super.c 165 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/super.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
23
#include <linux/vmalloc.h>
24 25 26
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
27
#include <linux/backing-dev.h>
28 29
#include <linux/parser.h>
#include <linux/buffer_head.h>
30
#include <linux/exportfs.h>
31 32 33 34 35 36
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
Theodore Ts'o's avatar
Theodore Ts'o committed
37
#include <linux/ctype.h>
Vignesh Babu's avatar
Vignesh Babu committed
38
#include <linux/log2.h>
39
#include <linux/crc16.h>
40
#include <linux/dax.h>
41
#include <linux/cleancache.h>
42
#include <linux/uaccess.h>
43

44 45 46
#include <linux/kthread.h>
#include <linux/freezer.h>

47
#include "ext4.h"
48
#include "ext4_extents.h"	/* Needed for trace points definition */
49
#include "ext4_jbd2.h"
50 51
#include "xattr.h"
#include "acl.h"
52
#include "mballoc.h"
53
#include "fsmap.h"
54

55 56 57
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

58 59
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
60
static struct ratelimit_state ext4_mount_msg_ratelimit;
61

62
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
63
			     unsigned long journal_devnum);
64
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
65
static int ext4_commit_super(struct super_block *sb, int sync);
66 67 68 69
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es);
70
static int ext4_sync_fs(struct super_block *sb, int wait);
71 72
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
73 74
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
Al Viro's avatar
Al Viro committed
75 76
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
77 78
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
79
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
80 81
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
82
static void ext4_clear_request_list(void);
83 84
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
85

Jan Kara's avatar
Jan Kara committed
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
 *   i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
 *   transaction start -> i_data_sem (rw)
 *
 * direct IO:
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
 *   transaction start -> i_data_sem (rw)
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

116
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
117 118 119 120 121 122 123
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
124
MODULE_ALIAS_FS("ext2");
125
MODULE_ALIAS("ext2");
126 127 128 129 130 131
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


132 133 134
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
Al Viro's avatar
Al Viro committed
135
	.mount		= ext4_mount,
136 137 138
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
139
MODULE_ALIAS_FS("ext3");
140
MODULE_ALIAS("ext3");
141
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
142

143 144 145
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
146
	if (!ext4_has_feature_metadata_csum(sb))
147 148 149 150 151
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

152 153 154 155 156 157 158 159 160 161 162 163
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

164 165
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
166
{
167
	if (!ext4_has_metadata_csum(sb))
168 169 170 171 172
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

173
void ext4_superblock_csum_set(struct super_block *sb)
174
{
175 176
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

177
	if (!ext4_has_metadata_csum(sb))
178 179 180 181 182
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

183 184 185 186
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
	void *ret;

187
	ret = kmalloc(size, flags | __GFP_NOWARN);
188 189 190 191 192 193 194 195 196
	if (!ret)
		ret = __vmalloc(size, flags, PAGE_KERNEL);
	return ret;
}

void *ext4_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

197
	ret = kzalloc(size, flags | __GFP_NOWARN);
198 199 200 201 202
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}

203 204
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
205
{
206
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
207
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
208
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
209 210
}

211 212
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
213
{
214
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
215
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
216
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
217 218
}

219 220
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
221
{
222
	return le32_to_cpu(bg->bg_inode_table_lo) |
223
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
224
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
225 226
}

227 228
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
229 230 231
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
232
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
233 234 235 236 237 238 239
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
240
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
241 242 243 244 245 246 247
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
248
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
249 250 251 252 253 254 255
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
256
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
257 258
}

259 260
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
261
{
262
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
263 264
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
265 266
}

267 268
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
269
{
270
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
271 272
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
273 274
}

275 276
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
277
{
278
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
279 280
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
281 282
}

283 284
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

315

316 317 318 319 320 321
static void __save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
322 323
	if (bdev_read_only(sb->s_bdev))
		return;
324 325 326 327 328 329 330 331 332 333 334 335
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
	es->s_last_error_time = cpu_to_le32(get_seconds());
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
	}
336 337 338 339 340 341
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
342
	le32_add_cpu(&es->s_error_count, 1);
343 344 345 346 347 348 349 350 351
}

static void save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	__save_error_info(sb, func, line);
	ext4_commit_super(sb, 1);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

Bobi Jam's avatar
Bobi Jam committed
368 369 370 371 372
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
373
	struct ext4_journal_cb_entry	*jce;
Bobi Jam's avatar
Bobi Jam committed
374

375
	BUG_ON(txn->t_state == T_FINISHED);
376 377 378

	ext4_process_freed_data(sb, txn->t_tid);

Bobi Jam's avatar
Bobi Jam committed
379
	spin_lock(&sbi->s_md_lock);
380 381 382
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
Bobi Jam's avatar
Bobi Jam committed
383 384 385 386 387 388 389
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
390

391 392 393 394
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
395
 * superblock.  That is not possible on ext4, because we may have other
396 397 398 399 400
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
401
 * We'll just use the jbd2_journal_abort() error code to record an error in
402
 * the journal instead.  On recovery, the journal will complain about
403 404 405
 * that error until we've noted it down and cleared it.
 */

406
static void ext4_handle_error(struct super_block *sb)
407 408 409 410
{
	if (sb->s_flags & MS_RDONLY)
		return;

411
	if (!test_opt(sb, ERRORS_CONT)) {
412
		journal_t *journal = EXT4_SB(sb)->s_journal;
413

414
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
415
		if (journal)
416
			jbd2_journal_abort(journal, -EIO);
417
	}
418
	if (test_opt(sb, ERRORS_RO)) {
419
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
420 421 422 423 424
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
425 426
		sb->s_flags |= MS_RDONLY;
	}
427 428 429 430
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
431
		panic("EXT4-fs (device %s): panic forced after error\n",
432
			sb->s_id);
433
	}
434 435
}

436 437 438 439
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

440
void __ext4_error(struct super_block *sb, const char *function,
441
		  unsigned int line, const char *fmt, ...)
442
{
Joe Perches's avatar
Joe Perches committed
443
	struct va_format vaf;
444 445
	va_list args;

446 447 448
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

449 450 451 452 453 454 455 456 457
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
458
	save_error_info(sb, function, line);
459
	ext4_handle_error(sb);
460 461
}

462 463 464
void __ext4_error_inode(struct inode *inode, const char *function,
			unsigned int line, ext4_fsblk_t block,
			const char *fmt, ...)
465 466
{
	va_list args;
467
	struct va_format vaf;
468
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
469

470 471 472
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

473 474
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
	es->s_last_error_block = cpu_to_le64(block);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
491
	save_error_info(inode->i_sb, function, line);
492 493 494
	ext4_handle_error(inode->i_sb);
}

495 496 497
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
498 499
{
	va_list args;
500
	struct va_format vaf;
501
	struct ext4_super_block *es;
Al Viro's avatar
Al Viro committed
502
	struct inode *inode = file_inode(file);
503 504
	char pathname[80], *path;

505 506 507
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

508 509
	es = EXT4_SB(inode->i_sb)->s_es;
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
510
	if (ext4_error_ratelimit(inode->i_sb)) {
511
		path = file_path(file, pathname, sizeof(pathname));
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
531
	save_error_info(inode->i_sb, function, line);
532 533 534
	ext4_handle_error(inode->i_sb);
}

535 536
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
537 538 539 540
{
	char *errstr = NULL;

	switch (errno) {
541 542 543 544 545 546
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
547 548 549 550 551 552 553
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
554 555
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

575
/* __ext4_std_error decodes expected errors from journaling functions
576 577
 * automatically and invokes the appropriate error response.  */

578 579
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
580 581 582 583
{
	char nbuf[16];
	const char *errstr;

584 585 586
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

587 588 589 590 591 592 593
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
	if (errno == -EROFS && journal_current_handle() == NULL &&
	    (sb->s_flags & MS_RDONLY))
		return;

594 595 596 597 598
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
599

600
	save_error_info(sb, function, line);
601
	ext4_handle_error(sb);
602 603 604
}

/*
605
 * ext4_abort is a much stronger failure handler than ext4_error.  The
606 607 608 609 610 611 612 613
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

614
void __ext4_abort(struct super_block *sb, const char *function,
615
		unsigned int line, const char *fmt, ...)
616
{
617
	struct va_format vaf;
618 619
	va_list args;

620 621 622
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

623
	save_error_info(sb, function, line);
624
	va_start(args, fmt);
625 626 627 628
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
629 630
	va_end(args);

631 632 633
	if ((sb->s_flags & MS_RDONLY) == 0) {
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
634 635 636 637 638 639
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
		sb->s_flags |= MS_RDONLY;
640 641 642 643
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
		save_error_info(sb, function, line);
	}
644 645 646 647
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
648
		panic("EXT4-fs panic from previous error\n");
649
	}
650 651
}

652 653
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
654
{
Joe Perches's avatar
Joe Perches committed
655
	struct va_format vaf;
656 657
	va_list args;

658 659 660
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

661
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
662 663 664
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
665 666 667
	va_end(args);
}

668 669 670 671
#define ext4_warning_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
			     "EXT4-fs warning")

672
void __ext4_warning(struct super_block *sb, const char *function,
673
		    unsigned int line, const char *fmt, ...)
674
{
Joe Perches's avatar
Joe Perches committed
675
	struct va_format vaf;
676 677
	va_list args;

678
	if (!ext4_warning_ratelimit(sb))
679 680
		return;

681
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
682 683 684 685
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
686 687 688
	va_end(args);
}

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

707 708 709 710
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
711 712 713
__releases(bitlock)
__acquires(bitlock)
{
Joe Perches's avatar
Joe Perches committed
714
	struct va_format vaf;
715 716 717
	va_list args;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

718 719 720
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

721 722 723
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	__save_error_info(sb, function, line);
Joe Perches's avatar
Joe Perches committed
724

725 726 727 728 729 730 731 732 733 734 735 736 737 738
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
739 740

	if (test_opt(sb, ERRORS_CONT)) {
741
		ext4_commit_super(sb, 0);
742 743
		return;
	}
744

745 746 747 748 749 750 751 752
	ext4_unlock_group(sb, grp);
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
Lucas De Marchi's avatar
Lucas De Marchi committed
753
	 * ext4_grp_locked_error() to distinguish between the
754 755 756 757 758 759 760 761
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

762
void ext4_update_dynamic_rev(struct super_block *sb)
763
{
764
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
765

766
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
767 768
		return;

769
	ext4_warning(sb,
770 771
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
772
		     EXT4_DYNAMIC_REV);
773

774 775 776
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
777 778 779 780 781 782 783 784 785 786 787 788 789
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
790
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
791 792 793 794
{
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

795
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
796 797 798 799 800
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
801
	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
802 803 804 805 806 807 808
			__bdevname(dev, b), PTR_ERR(bdev));
	return NULL;
}

/*
 * Release the journal device
 */
Al Viro's avatar
Al Viro committed
809
static void ext4_blkdev_put(struct block_device *bdev)
810
{
Al Viro's avatar
Al Viro committed
811
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
812 813
}

Al Viro's avatar
Al Viro committed
814
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
815 816 817 818
{
	struct block_device *bdev;
	bdev = sbi->journal_bdev;
	if (bdev) {
Al Viro's avatar
Al Viro committed
819
		ext4_blkdev_put(bdev);
820 821 822 823 824 825
		sbi->journal_bdev = NULL;
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
826
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
827 828
}

829
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
830 831 832
{
	struct list_head *l;

833 834
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
835 836 837 838 839 840 841 842 843 844 845 846

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

847 848 849 850 851 852 853
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

854 855 856
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
857 858 859 860 861 862 863
}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

864
static void ext4_put_super(struct super_block *sb)
865
{
866 867
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
868
	int aborted = 0;
869
	int i, err;
870

871
	ext4_unregister_li_request(sb);
872
	ext4_quota_off_umount(sb);
873

874 875
	flush_workqueue(sbi->rsv_conversion_wq);
	destroy_workqueue(sbi->rsv_conversion_wq);
876

877
	if (sbi->s_journal) {
878
		aborted = is_journal_aborted(sbi->s_journal);
879 880
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
881
		if ((err < 0) && !aborted)
882
			ext4_abort(sb, "Couldn't clean up the journal");
883
	}
884

885
	ext4_unregister_sysfs(sb);
886
	ext4_es_unregister_shrinker(sbi);
887
	del_timer_sync(&sbi->s_err_report);
888 889 890 891
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

892
	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
893
		ext4_clear_feature_journal_needs_recovery(sb);
894 895
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
896
	if (!(sb->s_flags & MS_RDONLY))
897 898
		ext4_commit_super(sb, 1);

899 900
	for (i = 0; i < sbi->s_gdb_count; i++)
		brelse(sbi->s_group_desc[i]);
Al Viro's avatar
Al Viro committed
901 902
	kvfree(sbi->s_group_desc);
	kvfree(sbi->s_flex_groups);
903
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
904 905
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
906
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
907
	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
908
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
909
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
910 911 912 913 914 915 916 917 918 919 920
		kfree(sbi->s_qf_names[i]);
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

921
	sync_blockdev(sb->s_bdev);
922
	invalidate_bdev(sb->s_bdev);
923 924 925 926 927 928 929
	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
		sync_blockdev(sbi->journal_bdev);
930
		invalidate_bdev(sbi->journal_bdev);
931
		ext4_blkdev_remove(sbi);
932
	}
933 934 935 936
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
937 938 939
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
940
	}
941 942
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
943
	brelse(sbi->s_sbh);
944
	sb->s_fs_info = NULL;
Theodore Ts'o's avatar
Theodore Ts'o committed
945 946 947 948 949 950
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
951 952
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
953
	kfree(sbi->s_blockgroup_lock);
954
	fs_put_dax(sbi->s_daxdev);
955 956 957
	kfree(sbi);
}

958
static struct kmem_cache *ext4_inode_cachep;
959 960 961 962

/*
 * Called inside transaction, so use GFP_NOFS
 */
963
static struct inode *ext4_alloc_inode(struct super_block *sb)
964
{
965
	struct ext4_inode_info *ei;
966

967
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
968 969
	if (!ei)
		return NULL;
970

971
	ei->vfs_inode.i_version = 1;
972
	spin_lock_init(&ei->i_raw_lock);
973 974
	INIT_LIST_HEAD(&ei->i_prealloc_list);
	spin_lock_init(&ei->i_prealloc_lock);
975 976
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
977
	INIT_LIST_HEAD(&ei->i_es_list);
978
	ei->i_es_all_nr = 0;
979
	ei->i_es_shk_nr = 0;
980
	ei->i_es_shrink_lblk = 0;
981
	ei->i_reserved_data_blocks = 0;
982
	ei->i_da_metadata_calc_len = 0;
983
	ei->i_da_metadata_calc_last_lblock = 0;
984
	spin_lock_init(&(ei->i_block_reservation_lock));
985 986
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
987
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
988
#endif
989
	ei->jinode = NULL;
990
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
991
	spin_lock_init(&ei->i_completed_io_lock);
992 993
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
994
	atomic_set(&ei->i_unwritten, 0);
995
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
996 997 998
	return &ei->vfs_inode;
}

999 1000 1001 1002 1003 1004 1005 1006
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

	trace_ext4_drop_inode(inode, drop);
	return drop;
}

Nick Piggin's avatar
Nick Piggin committed
1007 1008 1009 1010 1011 1012
static void ext4_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

1013
static void ext4_destroy_inode(struct inode *inode)
1014
{
1015
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1016 1017 1018
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
1019 1020 1021 1022 1023
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
Nick Piggin's avatar
Nick Piggin committed
1024
	call_rcu(&inode->i_rcu, ext4_i_callback);
1025 1026
}

1027
static void init_once(void *foo)
1028
{
1029
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1030

1031 1032
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
1033
	init_rwsem(&ei->i_data_sem);
1034
	init_rwsem(&ei->i_mmap_sem);
1035
	inode_init_once(&ei->vfs_inode);
1036 1037
}

1038
static int __init init_inodecache(void)
1039
{
1040 1041
	ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
					     sizeof(struct ext4_inode_info),
1042
					     0, (SLAB_RECLAIM_ACCOUNT|
1043
						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1044
					     init_once);
1045
	if (ext4_inode_cachep == NULL)
1046 1047 1048 1049 1050 1051
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1052 1053 1054 1055 1056
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1057
	kmem_cache_destroy(ext4_inode_cachep);
1058 1059
}

Al Viro's avatar
Al Viro committed
1060
void ext4_clear_inode(struct inode *inode)
1061
{
Al Viro's avatar
Al Viro committed
1062
	invalidate_inode_buffers(inode);
1063
	clear_inode(inode);
1064
	dquot_drop(inode);
1065
	ext4_discard_preallocations(inode);
1066
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1067 1068 1069 1070 1071 1072
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1073
#ifdef CONFIG_EXT4_FS_ENCRYPTION
1074
	fscrypt_put_encryption_info(inode, NULL);
1075
#endif
1076 1077
}

Christoph Hellwig's avatar
Christoph Hellwig committed
1078
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1079
					u64 ino, u32 generation)
1080 1081 1082
{
	struct inode *inode;

1083
	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1084
		return ERR_PTR(-ESTALE);
1085
	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1086 1087 1088 1089
		return ERR_PTR(-ESTALE);

	/* iget isn't really right if the inode is currently unallocated!!
	 *
1090
	 * ext4_read_inode will return a bad_inode if the inode had been
1091 1092 1093 1094 1095
	 * deleted, so we should be safe.
	 *
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1096
	inode = ext4_iget_normal(sb, ino);
1097 1098 1099
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1100 1101 1102
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
Christoph Hellwig's avatar
Christoph Hellwig committed
1103 1104 1105 1106 1107

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1108
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1109 1110 1111 1112 1113 1114
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1115
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1116 1117 1118
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1119 1120
}

1121 1122 1123 1124 1125 1126
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1127 1128
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1129 1130 1131 1132 1133 1134 1135 1136
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, page,
1137
						wait & ~__GFP_DIRECT_RECLAIM);
1138 1139 1140
	return try_to_free_buffers(page);
}

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
1151
	handle_t *handle = fs_data;
1152
	int res, res2, credits, retries = 0;
1153

1154 1155 1156 1157 1158 1159 1160 1161
	/*
	 * Encrypting the root directory is not allowed because e2fsck expects
	 * lost+found to exist and be unencrypted, and encrypting the root
	 * directory would imply encrypting the lost+found directory as well as
	 * the filename "lost+found" itself.
	 */
	if (inode->i_ino == EXT4_ROOT_INO)
		return -EPERM;
1162

1163 1164 1165 1166
	res = ext4_convert_inline_data(inode);
	if (res)
		return res;

1167 1168 1169 1170 1171 1172 1173
	/*
	 * If a journal handle was specified, then the encryption context is
	 * being set on a new inode via inheritance and is part of a larger
	 * transaction to create the inode.  Otherwise the encryption context is
	 * being set on an existing inode in its own transaction.  Only in the
	 * latter case should the "retry on ENOSPC" logic be used.
	 */
1174

1175 1176 1177 1178 1179
	if (handle) {
		res = ext4_xattr_set_handle(handle, inode,
					    EXT4_XATTR_INDEX_ENCRYPTION,
					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
					    ctx, len, 0);
1180 1181 1182 1183
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
1184 1185 1186 1187
			/*
			 * Update inode->i_flags - e.g. S_DAX may get disabled
			 */
			ext4_set_inode_flags(inode);
1188 1189 1190 1191
		}
		return res;
	}

1192 1193 1194
	res = dquot_initialize(inode);
	if (res)
		return res;
1195
retry:
1196 1197
	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
				     &credits);
1198 1199 1200
	if (res)
		return res;

1201
	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1202 1203 1204
	if (IS_ERR(handle))
		return PTR_ERR(handle);

1205 1206 1207
	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
				    ctx, len, 0);
1208 1209
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1210 1211
		/* Update inode->i_flags - e.g. S_DAX may get disabled */
		ext4_set_inode_flags(inode);
1212 1213 1214 1215 1216
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
1217 1218 1219

	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
1220 1221 1222 1223 1224
	if (!res)
		res = res2;
	return res;
}

1225
static bool ext4_dummy_context(struct inode *inode)
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
{
	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}

static unsigned ext4_max_namelen(struct inode *inode)
{
	return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
		EXT4_NAME_LEN;
}

1236
static const struct fscrypt_operations ext4_cryptops = {
1237
	.key_prefix		= "ext4:",
1238 1239 1240 1241 1242 1243 1244 1245
	.get_context		= ext4_get_context,
	.set_context		= ext4_set_context,
	.dummy_context		= ext4_dummy_context,
	.is_encrypted		= ext4_encrypted_inode,
	.empty_dir		= ext4_empty_dir,
	.max_namelen		= ext4_max_namelen,
};
#else
1246
static const struct fscrypt_operations ext4_cryptops = {
1247 1248 1249 1250
	.is_encrypted		= ext4_encrypted_inode,
};
#endif

1251
#ifdef CONFIG_QUOTA
1252
static const char * const quotatypes[] = INITQFNAMES;
Li Xi's avatar
Li Xi committed
1253
#define QTYPE2NAME(t) (quotatypes[t])
1254

1255 1256 1257 1258 1259
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1260
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1261
			 const struct path *path);
1262 1263
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1264
			       size_t len, loff_t off);
1265
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1266
				const char *data, size_t len, loff_t off);
1267 1268 1269
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1270
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1271

1272 1273 1274 1275 1276
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1277
static const struct dquot_operations ext4_quota_operations = {
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
	.get_reserved_space	= ext4_get_reserved_space,
	.write_dquot		= ext4_write_dquot,
	.acquire_dquot		= ext4_acquire_dquot,
	.release_dquot		= ext4_release_dquot,
	.mark_dirty		= ext4_mark_dquot_dirty,
	.write_info		= ext4_write_info,
	.alloc_dquot		= dquot_alloc,
	.destroy_dquot		= dquot_destroy,
	.get_projid		= ext4_get_projid,
	.get_inode_usage	= ext4_get_inode_usage,
	.get_next_id		= ext4_get_next_id,
1289 1290
};

1291
static const struct quotactl_ops ext4_qctl_operations = {
1292
	.quota_on	= ext4_quota_on,
1293
	.quota_off	= ext4_quota_off,
1294
	.quota_sync	= dquot_quota_sync,
1295
	.get_state	= dquot_get_state,
1296 1297
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1298 1299
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1300 1301 1302
};
#endif

1303
static const struct super_operations ext4_sops = {
1304 1305 1306 1307
	.alloc_inode	= ext4_alloc_inode,
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1308
	.drop_inode	= ext4_drop_inode,
Al Viro's avatar
Al Viro committed
1309
	.evict_inode	= ext4_evict_inode,
1310 1311
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1312 1313
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1314 1315 1316
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1317
#ifdef CONFIG_QUOTA
1318 1319
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
1320
	.get_dquots	= ext4_get_dquots,
1321
#endif
1322
	.bdev_try_to_free_page = bdev_try_to_free_page,
1323 1324
};

1325
static const struct export_operations ext4_export_ops = {
Christoph Hellwig's avatar
Christoph Hellwig committed
1326 1327
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1328
	.get_parent = ext4_get_parent,
1329 1330 1331 1332 1333
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1334
	Opt_nouid32, Opt_debug, Opt_removed,
1335
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1336
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1337 1338
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1339
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1340
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1341
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1342
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Theodore Ts'o's avatar
Theodore Ts'o committed
1343
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1344
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1345
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1346
	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1347
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1348
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1349
	Opt_dioread_nolock, Opt_dioread_lock,
1350
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1351
	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1352 1353
};

1354
static const match_table_t tokens = {
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1369 1370
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1371 1372 1373 1374
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1375
	{Opt_noload, "norecovery"},
1376
	{Opt_noload, "noload"},
1377 1378
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1379
	{Opt_commit, "commit=%u"},
1380 1381
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1382
	{Opt_journal_dev, "journal_dev=%u"},
1383
	{Opt_journal_path, "journal_path=%s"},
1384
	{Opt_journal_checksum, "journal_checksum"},
1385
	{Opt_nojournal_checksum, "nojournal_checksum"},
1386
	{Opt_journal_async_commit, "journal_async_commit"},
1387 1388 1389 1390
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1391 1392
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1393 1394 1395 1396 1397 1398
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1399
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1400 1401 1402 1403
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1404
	{Opt_prjquota, "prjquota"},
1405
	{Opt_barrier, "barrier=%u"},
1406 1407
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1408
	{Opt_i_version, "i_version"},
Ross Zwisler's avatar
Ross Zwisler committed
1409
	{Opt_dax, "dax"},
1410
	{Opt_stripe, "stripe=%u"},
1411
	{Opt_delalloc, "delalloc"},
1412 1413
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1414
	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1415
	{Opt_nodelalloc, "nodelalloc"},
1416 1417
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1418 1419
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1420
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1421
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1422
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
1423 1424
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1425 1426
	{Opt_dioread_nolock, "dioread_nolock"},
	{Opt_dioread_lock, "dioread_lock"},
1427 1428
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1429 1430 1431
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1432
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1433
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1434 1435
	{Opt_nombcache, "nombcache"},
	{Opt_nombcache, "no_mbcache"},	/* for backward compatibility */
1436 1437 1438 1439 1440
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
Josef Bacik's avatar
Josef Bacik committed
1441
	{Opt_err, NULL},
1442 1443
};

1444
static ext4_fsblk_t get_sb_block(void **data)
1445
{
1446
	ext4_fsblk_t	sb_block;
1447 1448 1449 1450
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1451

1452
	options += 3;
1453
	/* TODO: use simple_strtoll with >32bit ext4 */
1454 1455
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1456
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1457 1458 1459 1460 1461 1462
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1463

1464 1465 1466
	return sb_block;
}

1467
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1468 1469
static const char deprecated_msg[] =
	"Mount option \"%s\" will be removed by %s\n"
1470
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1471

1472 1473 1474 1475 1476
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *qname;
1477
	int ret = -1;
1478 1479 1480 1481 1482 1483

	if (sb_any_quota_loaded(sb) &&
		!sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1484
		return -1;
1485
	}
1486
	if (ext4_has_feature_quota(sb)) {
1487 1488 1489
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1490
	}
1491 1492 1493 1494
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1495
		return -1;
1496
	}
1497 1498 1499 1500 1501 1502 1503 1504
	if (sbi->s_qf_names[qtype]) {
		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
1505
	}
1506
	if (strchr(qname, '/')) {
1507 1508
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1509
		goto errout;
1510
	}
1511
	sbi->s_qf_names[qtype] = qname;
1512
	set_opt(sb, QUOTA);
1513
	return 1;
1514 1515 1516
errout:
	kfree(qname);
	return ret;
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sb_any_quota_loaded(sb) &&
		sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1528
		return -1;
1529
	}
1530
	kfree(sbi->s_qf_names[qtype]);
1531 1532 1533 1534 1535
	sbi->s_qf_names[qtype] = NULL;
	return 1;
}
#endif

1536 1537 1538 1539 1540 1541
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1542
#ifdef CONFIG_QUOTA
1543 1544 1545 1546 1547
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1548
#endif
1549
#define MOPT_DATAJ	0x0080
1550 1551 1552
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1553
#define MOPT_STRING	0x0400
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1566 1567 1568 1569
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1570 1571
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1572 1573 1574
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1575
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1576 1577
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1578
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1579
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1580
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1581
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1582
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1583
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1584 1585 1586
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1587
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1588
	 MOPT_NO_EXT2},
1589
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1590
	 MOPT_NO_EXT2},
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
Ross Zwisler's avatar
Ross Zwisler committed
1601
	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1602
	{Opt_stripe, 0, MOPT_GTE0},
1603 1604
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1605 1606 1607
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1608 1609 1610 1611
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1612 1613
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
Theodore Ts'o's avatar
Theodore Ts'o committed
1614
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1615 1616
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1617
#else
1618 1619
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1620
#endif
1621 1622
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1623
	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1624 1625 1626 1627 1628
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
1629 1630
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
1631
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1632 1633
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
1634 1635 1636 1637 1638 1639 1640
	{Opt_usrjquota, 0, MOPT_Q},
	{Opt_grpjquota, 0, MOPT_Q},
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1641
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1642
	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
1643
	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1644 1645 1646 1647 1648 1649 1650 1651 1652
	{Opt_err, 0, 0}
};

static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
1653 1654
	kuid_t uid;
	kgid_t gid;
1655 1656
	int arg = 0;

1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
1667
	switch (token) {
1668 1669 1670 1671
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
1672 1673 1674
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
1675
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1676 1677 1678 1679 1680 1681 1682
		return 1;
	case Opt_abort:
		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
		return 1;
	case Opt_i_version:
		sb->s_flags |= MS_I_VERSION;
		return 1;
1683 1684 1685 1686 1687 1688
	case Opt_lazytime:
		sb->s_flags |= MS_LAZYTIME;
		return 1;
	case Opt_nolazytime:
		sb->s_flags &= ~MS_LAZYTIME;
		return 1;
1689 1690
	}

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

1712
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1713 1714 1715
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
1716 1717 1718
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
1719 1720
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1721 1722 1723
		} else
			return -1;
	}
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
		sbi->s_commit_interval = HZ * arg;
1738 1739
	} else if (token == Opt_debug_want_extra_isize) {
		sbi->s_want_extra_isize = arg;
1740 1741 1742 1743 1744
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
1745 1746 1747 1748
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
1749
			return -1;
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1765 1766
			return -1;
		}
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

1808
		journal_inode = d_inode(path.dentry);
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
1820 1821 1822 1823 1824 1825 1826 1827
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1828 1829 1830 1831 1832 1833 1834 1835 1836
	} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mode enabled");
#else
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mount option ignored");
#endif
1837 1838 1839 1840 1841
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1842
				ext4_msg(sb, KERN_ERR,
1843 1844
					 "Cannot change data mode on remount");
				return -1;
1845
			}
1846
		} else {
1847 1848
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
1849
		}
1850 1851 1852 1853 1854 1855 1856 1857
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
1858
		if (ext4_has_feature_quota(sb)) {
1859 1860
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
1861
				 "when QUOTA feature is enabled");
1862
			return 1;
1863
		}
1864
		sbi->s_jquota_fmt = m->mount_opt;
Ross Zwisler's avatar
Ross Zwisler committed
1865 1866
#endif
	} else if (token == Opt_dax) {
1867 1868 1869 1870 1871
#ifdef CONFIG_FS_DAX
		ext4_msg(sb, KERN_WARNING,
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= m->mount_opt;
#else
Ross Zwisler's avatar
Ross Zwisler committed
1872 1873
		ext4_msg(sb, KERN_INFO, "dax option not supported");
		return -1;
1874
#endif
1875 1876 1877 1878
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
		if (arg != 0)
			sbi->s_mount_opt |= m->mount_opt;
		else
			sbi->s_mount_opt &= ~m->mount_opt;
1894
	}
1895
	return 1;
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
1918
		args[0].to = args[0].from = NULL;
1919 1920 1921 1922
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
1923 1924
	}
#ifdef CONFIG_QUOTA
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1936
		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1937
			clear_opt(sb, USRQUOTA);
1938

1939
		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1940
			clear_opt(sb, GRPQUOTA);
1941

1942
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1943 1944
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
1945 1946 1947 1948
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
1949 1950
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
1951 1952 1953 1954
			return 0;
		}
	}
#endif
1955 1956 1957 1958
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

1959
		if (blocksize < PAGE_SIZE) {
1960 1961 1962 1963 1964
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "dioread_nolock if block size != PAGE_SIZE");
			return 0;
		}
	}
1965 1966 1967
	return 1;
}

1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

	if (sbi->s_qf_names[USRQUOTA])
1992
		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1993 1994

	if (sbi->s_qf_names[GRPQUOTA])
1995
		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
1996 1997 1998
#endif
}

1999 2000
static const char *token2str(int token)
{
2001
	const struct match_token *t;
2002 2003 2004 2005 2006 2007 2008

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

2009 2010 2011 2012 2013
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
2014 2015
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
2016 2017 2018
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
2019
	int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
2020
	const struct mount_opts *m;
2021
	char sep = nodefs ? '\n' : ',';
2022

2023 2024
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2025 2026

	if (sbi->s_sb_block != 1)
2027 2028 2029 2030 2031 2032 2033
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
		    (m->flags & MOPT_CLEAR_ERR))
			continue;
2034
		if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2035 2036 2037 2038 2039 2040
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
2041
	}
2042

2043
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2044
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2045 2046 2047
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2048
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2049 2050
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2051
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2052 2053
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
2054
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2055
		SEQ_OPTS_PUTS("errors=continue");
2056
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2057
		SEQ_OPTS_PUTS("errors=panic");
2058
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2059
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2060
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2061
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2062
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2063
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2064
	if (sb->s_flags & MS_I_VERSION)
2065
		SEQ_OPTS_PUTS("i_version");
2066
	if (nodefs || sbi->s_stripe)
2067
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2068
	if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
2069 2070 2071 2072 2073 2074 2075
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2076 2077
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2078 2079
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2080

2081 2082
	if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2083
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2084 2085
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2086 2087
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2088 2089 2090 2091 2092

	ext4_show_quota_options(seq, sb);
	return 0;
}

2093 2094 2095 2096 2097
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2098
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
{
	struct super_block *sb = seq->private;
	int rc;

	seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2109
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2110 2111
			    int read_only)
{
2112
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2113 2114
	int res = 0;

2115
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2116 2117
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2118 2119 2120
		res = MS_RDONLY;
	}
	if (read_only)
2121
		goto done;
2122
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2123 2124
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2125
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2126 2127 2128
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2129
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2130 2131
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2132 2133 2134
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2135 2136 2137
	else if (le32_to_cpu(es->s_checkinterval) &&
		(le32_to_cpu(es->s_lastcheck) +
			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
2138 2139 2140
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2141
	if (!sbi->s_journal)
2142
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2143
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2144
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2145
	le16_add_cpu(&es->s_mnt_count, 1);
2146
	es->s_mtime = cpu_to_le32(get_seconds());
2147
	ext4_update_dynamic_rev(sb);
2148
	if (sbi->s_journal)
2149
		ext4_set_feature_journal_needs_recovery(sb);
2150

2151
	ext4_commit_super(sb, 1);
2152
done:
2153
	if (test_opt(sb, DEBUG))
2154
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2155
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2156 2157
			sb->s_blocksize,
			sbi->s_groups_count,
2158 2159
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2160
			sbi->s_mount_opt, sbi->s_mount_opt2);
2161

2162
	cleancache_init_fs(sb);
2163 2164 2165
	return res;
}

2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct flex_groups *new_groups;
	int size;

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
2180
	new_groups = kvzalloc(size, GFP_KERNEL);
2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
	if (!new_groups) {
		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
			 size / (int) sizeof(struct flex_groups));
		return -ENOMEM;
	}

	if (sbi->s_flex_groups) {
		memcpy(new_groups, sbi->s_flex_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups)));
Al Viro's avatar
Al Viro committed
2191
		kvfree(sbi->s_flex_groups);
2192 2193 2194 2195 2196 2197
	}
	sbi->s_flex_groups = new_groups;
	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
	return 0;
}

2198 2199 2200 2201 2202
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t flex_group;
2203
	int i, err;
2204

2205
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2206
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2207 2208 2209 2210
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2211 2212
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2213
		goto failed;
2214 2215

	for (i = 0; i < sbi->s_groups_count; i++) {
2216
		gdp = ext4_get_group_desc(sb, i, NULL);
2217 2218

		flex_group = ext4_flex_group(sbi, i);
2219 2220
		atomic_add(ext4_free_inodes_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].free_inodes);
2221 2222
		atomic64_add(ext4_free_group_clusters(sb, gdp),
			     &sbi->s_flex_groups[flex_group].free_clusters);
2223 2224
		atomic_add(ext4_used_dirs_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].used_dirs);
2225 2226 2227 2228 2229 2230 2231
	}

	return 1;
failed:
	return 0;
}

2232
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2233
				   struct ext4_group_desc *gdp)
2234
{
2235
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
2236
	__u16 crc = 0;
2237
	__le32 le_group = cpu_to_le32(block_group);
2238
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2239

2240
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2241 2242
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2243
		__u16 dummy_csum = 0;
2244 2245 2246

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2247 2248 2249 2250 2251 2252 2253
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2254 2255 2256

		crc = csum32 & 0xFFFF;
		goto out;
2257 2258
	}

2259
	/* old crc16 code */
2260
	if (!ext4_has_feature_gdt_csum(sb))
2261 2262
		return 0;

2263 2264 2265 2266 2267
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2268
	if (ext4_has_feature_64bit(sb) &&
2269 2270 2271 2272 2273 2274
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
2275 2276 2277
	return cpu_to_le16(crc);
}

2278
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2279 2280
				struct ext4_group_desc *gdp)
{
2281
	if (ext4_has_group_desc_csum(sb) &&
2282
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2283 2284 2285 2286 2287
		return 0;

	return 1;
}

2288 2289 2290 2291 2292
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2293
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2294 2295
}

2296
/* Called at mount-time, super-block is locked */
2297
static int ext4_check_descriptors(struct super_block *sb,
2298
				  ext4_fsblk_t sb_block,
2299
				  ext4_group_t *first_not_zeroed)
2300
{
2301 2302 2303
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
2304 2305 2306
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
2307
	int flexbg_flag = 0;
2308
	ext4_group_t i, grp = sbi->s_groups_count;
2309

2310
	if (ext4_has_feature_flex_bg(sb))
2311 2312
		flexbg_flag = 1;

2313
	ext4_debug("Checking group descriptors");
2314

2315 2316 2317
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

2318
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
2319
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2320 2321
		else
			last_block = first_block +
2322
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2323

2324 2325 2326 2327
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2328
		block_bitmap = ext4_block_bitmap(sb, gdp);
2329 2330 2331 2332 2333
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
		}
2334
		if (block_bitmap < first_block || block_bitmap > last_block) {
2335
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2336
			       "Block bitmap for group %u not in group "
2337
			       "(block %llu)!", i, block_bitmap);
2338 2339
			return 0;
		}
2340
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2341 2342 2343 2344 2345
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
		}
2346
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2347
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2348
			       "Inode bitmap for group %u not in group "
2349
			       "(block %llu)!", i, inode_bitmap);
2350 2351
			return 0;
		}
2352
		inode_table = ext4_inode_table(sb, gdp);
2353 2354 2355 2356 2357
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
		}
2358
		if (inode_table < first_block ||
2359
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2360
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2361
			       "Inode table for group %u not in group "
2362
			       "(block %llu)!", i, inode_table);
2363 2364
			return 0;
		}
2365
		ext4_lock_group(sb, i);
2366
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2367 2368
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2369
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2370
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2371
			if (!(sb->s_flags & MS_RDONLY)) {
2372
				ext4_unlock_group(sb, i);
2373
				return 0;
2374
			}
2375
		}
2376
		ext4_unlock_group(sb, i);
2377 2378
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2379
	}
2380 2381
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2382 2383 2384
	return 1;
}

2385
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2398
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2399 2400 2401
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2402 2403
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2404 2405
{
	unsigned int s_flags = sb->s_flags;
2406
	int ret, nr_orphans = 0, nr_truncates = 0;
2407 2408 2409 2410 2411 2412 2413 2414
#ifdef CONFIG_QUOTA
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2415
	if (bdev_read_only(sb->s_bdev)) {
2416 2417
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2418 2419 2420
		return;
	}

2421 2422 2423 2424 2425 2426 2427
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2428
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2429 2430
		/* don't clear list on RO mount w/ errors */
		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
2431
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2432
				  "clearing orphan list.\n");
2433 2434
			es->s_last_orphan = 0;
		}
2435 2436 2437 2438 2439
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

	if (s_flags & MS_RDONLY) {
2440
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2441 2442 2443 2444 2445 2446
		sb->s_flags &= ~MS_RDONLY;
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
	sb->s_flags |= MS_ACTIVE;
	/* Turn on quotas so that they are updated correctly */
Jan Kara's avatar
Jan Kara committed
2447
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2448 2449
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
2450
			if (ret < 0)
2451 2452 2453
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
					"quota: error %d", ret);
2454 2455 2456 2457 2458 2459 2460
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

2471 2472
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
2473 2474 2475 2476
			es->s_last_orphan = 0;
			break;
		}

2477
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2478
		dquot_initialize(inode);
2479
		if (inode->i_nlink) {
2480 2481 2482 2483
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
2484
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2485
				  inode->i_ino, inode->i_size);
Al Viro's avatar
Al Viro committed
2486
			inode_lock(inode);
2487
			truncate_inode_pages(inode->i_mapping, inode->i_size);
2488 2489 2490
			ret = ext4_truncate(inode);
			if (ret)
				ext4_std_error(inode->i_sb, ret);
Al Viro's avatar
Al Viro committed
2491
			inode_unlock(inode);
2492 2493
			nr_truncates++;
		} else {
2494 2495 2496 2497
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
2498 2499 2500 2501 2502 2503 2504
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

2505
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2506 2507

	if (nr_orphans)
2508 2509
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
2510
	if (nr_truncates)
2511 2512
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
2513 2514
#ifdef CONFIG_QUOTA
	/* Turn quotas off */
Jan Kara's avatar
Jan Kara committed
2515
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2516
		if (sb_dqopt(sb)->files[i])
2517
			dquot_quota_off(sb, i);
2518 2519 2520 2521
	}
#endif
	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
}
2522

2523 2524 2525 2526 2527 2528 2529
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
2530 2531 2532 2533 2534 2535
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
2536 2537
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
2538
static loff_t ext4_max_size(int blkbits, int has_huge_files)
2539 2540 2541 2542 2543
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

	/* small i_blocks in vfs inode? */
2544
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2545
		/*
2546
		 * CONFIG_LBDAF is not enabled implies the inode
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
		 * i_block represent total blocks in 512 bytes
		 * 32 == size of vfs inode i_blocks * 8
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

2557 2558 2559 2560 2561 2562
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
2563 2564 2565 2566 2567 2568 2569 2570
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
2571 2572

/*
2573
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2574 2575
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
2576
 */
2577
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2578
{
2579
	loff_t res = EXT4_NDIR_BLOCKS;
2580 2581
	int meta_blocks;
	loff_t upper_limit;
2582 2583 2584 2585 2586 2587
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
2588 2589
	 */

2590
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2591
		/*
2592
		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2593 2594
		 * the inode i_block field represents total file blocks in
		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2595 2596 2597 2598 2599 2600 2601
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
2602 2603 2604 2605 2606 2607
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
2621 2622 2623 2624 2625 2626 2627

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
2628 2629 2630 2631

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

2632 2633 2634
	return res;
}

2635
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2636
				   ext4_fsblk_t logical_sb_block, int nr)
2637
{
2638
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2639
	ext4_group_t bg, first_meta_bg;
2640 2641 2642 2643
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

2644
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2645
		return logical_sb_block + nr + 1;
2646
	bg = sbi->s_desc_per_block * nr;
2647
	if (ext4_bg_has_super(sb, bg))
2648
		has_super = 1;
2649

2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
		has_super++;

2660
	return (has_super + ext4_group_first_block_no(sb, bg));
2661 2662
}

2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2679
	int ret;
2680 2681

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2682
		ret = sbi->s_stripe;
2683
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
2684
		ret = stripe_width;
2685
	else if (stride && stride <= sbi->s_blocks_per_group)
2686 2687 2688
		ret = stride;
	else
		ret = 0;
2689

2690 2691 2692 2693 2694 2695
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
2696

2697
	return ret;
2698
}
2699

2700 2701 2702 2703 2704 2705 2706 2707
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
2708
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

	if (readonly)
		return 1;

2720
	if (ext4_has_feature_readonly(sb)) {
2721 2722 2723 2724 2725
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
		sb->s_flags |= MS_RDONLY;
		return 1;
	}

2726
	/* Check that feature set is OK for a read-write mount */
2727
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
	/*
	 * Large file size enabled file system can only be mounted
	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
	 */
2738
	if (ext4_has_feature_huge_file(sb)) {
2739 2740 2741 2742 2743 2744 2745
		if (sizeof(blkcnt_t) < sizeof(u64)) {
			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
				 "cannot be mounted RDWR without "
				 "CONFIG_LBDAF");
			return 0;
		}
	}
2746
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2747 2748 2749 2750 2751
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
2752 2753

#ifndef CONFIG_QUOTA
2754
	if (ext4_has_feature_quota(sb) && !readonly) {
2755 2756 2757 2758 2759
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
Li Xi's avatar
Li Xi committed
2760 2761 2762 2763 2764 2765
	if (ext4_has_feature_project(sb) && !readonly) {
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with project quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
2766
#endif  /* CONFIG_QUOTA */
2767 2768 2769
	return 1;
}

2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
static void print_daily_error_info(unsigned long arg)
{
	struct super_block *sb = (struct super_block *) arg;
	struct ext4_sb_info *sbi;
	struct ext4_super_block *es;

	sbi = EXT4_SB(sb);
	es = sbi->s_es;

	if (es->s_error_count)
2784 2785
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2786 2787
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
2788
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2789 2790 2791 2792 2793
		       sb->s_id, le32_to_cpu(es->s_first_error_time),
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
2794
			printk(KERN_CONT ": inode %u",
2795 2796
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
2797
			printk(KERN_CONT ": block %llu", (unsigned long long)
2798
			       le64_to_cpu(es->s_first_error_block));
2799
		printk(KERN_CONT "\n");
2800 2801
	}
	if (es->s_last_error_time) {
2802
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2803 2804 2805 2806 2807
		       sb->s_id, le32_to_cpu(es->s_last_error_time),
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
2808
			printk(KERN_CONT ": inode %u",
2809 2810
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
2811
			printk(KERN_CONT ": block %llu", (unsigned long long)
2812
			       le64_to_cpu(es->s_last_error_block));
2813
		printk(KERN_CONT "\n");
2814 2815 2816 2817
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t group, ngroups;
	struct super_block *sb;
	unsigned long timeout = 0;
	int ret = 0;

	sb = elr->lr_super;
	ngroups = EXT4_SB(sb)->s_groups_count;

	for (group = elr->lr_next_group; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

2841
	if (group >= ngroups)
2842 2843 2844 2845 2846 2847 2848
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
		if (elr->lr_timeout == 0) {
2849 2850
			timeout = (jiffies - timeout) *
				  elr->lr_sbi->s_li_wait_mult;
2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
2861
 * request structure. Should be called with li_list_mtx held
2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	struct ext4_sb_info *sbi;

	if (!elr)
		return;

	sbi = elr->lr_sbi;

	list_del(&elr->lr_request);
	sbi->s_li_request = NULL;
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
2879 2880 2881
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
2882
		return;
2883
	}
2884 2885

	mutex_lock(&ext4_li_info->li_list_mtx);
2886
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2887
	mutex_unlock(&ext4_li_info->li_list_mtx);
2888
	mutex_unlock(&ext4_li_mtx);
2889 2890
}

2891 2892
static struct task_struct *ext4_lazyinit_task;

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
2907
	unsigned long next_wakeup, cur;
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
2921 2922
			int err = 0;
			int progress = 0;
2923 2924 2925
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
2944
				}
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
2956 2957 2958 2959 2960 2961
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

2962
		try_to_freeze();
2963

2964 2965
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
2966
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
2967 2968 2969 2970
			cond_resched();
			continue;
		}

2971 2972
		schedule_timeout_interruptible(next_wakeup - cur);

2973 2974 2975 2976
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3019 3020 3021 3022
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3023 3024 3025
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3026
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
	elr->lr_sbi = sbi;
	elr->lr_next_group = start;

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3095 3096
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3097 3098 3099
	return elr;
}

3100 3101
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3102 3103
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3104
	struct ext4_li_request *elr = NULL;
3105
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3106
	int ret = 0;
3107

3108
	mutex_lock(&ext4_li_mtx);
3109 3110 3111 3112 3113 3114
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3115
		goto out;
3116
	}
3117 3118 3119

	if (first_not_zeroed == ngroups ||
	    (sb->s_flags & MS_RDONLY) ||
3120
	    !test_opt(sb, INIT_INODE_TABLE))
3121
		goto out;
3122 3123

	elr = ext4_li_request_new(sb, first_not_zeroed);
3124 3125 3126 3127
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3140 3141 3142 3143 3144 3145
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3146 3147 3148 3149 3150 3151 3152

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3153 3154
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3169
	if (!ext4_li_info || !ext4_lazyinit_task)
3170 3171
		return;

3172
	kthread_stop(ext4_lazyinit_task);
3173 3174
}

3175 3176 3177 3178 3179 3180
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3181
	if (ext4_has_metadata_csum(sb)) {
3182
		/* journal checksum v3 */
3183
		compat = 0;
3184
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3185 3186 3187 3188 3189 3190
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3191 3192 3193 3194
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3207 3208
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3209 3210 3211 3212 3213
	}

	return ret;
}

3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3238
	if (!ext4_has_feature_bigalloc(sb))
3239 3240 3241
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3271 3272 3273 3274 3275
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3276
		}
3277 3278 3279
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3294 3295
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3296 3297
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3298
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3326 3327 3328 3329 3330

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3331
	if (sbi->s_journal && !sbi->journal_bdev)
3332
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3343 3344 3345 3346 3347 3348
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3349
static void ext4_set_resv_clusters(struct super_block *sb)
3350 3351
{
	ext4_fsblk_t resv_clusters;
3352
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3353

3354 3355 3356 3357 3358 3359
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3360
	if (!ext4_has_feature_extents(sb))
3361
		return;
3362 3363 3364 3365
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3366
	 * unwritten extents in delalloc path. In most cases such
3367 3368 3369
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3370 3371
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
3372 3373 3374 3375

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3376
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
3377 3378
}

3379
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3380
{
3381
	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3382
	char *orig_data = kstrdup(data, GFP_KERNEL);
3383
	struct buffer_head *bh;
3384
	struct ext4_super_block *es = NULL;
3385
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3386 3387
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3388
	ext4_fsblk_t logical_sb_block;
3389 3390 3391 3392
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3393
	const char *descr;
3394
	int ret = -ENOMEM;
3395
	int blocksize, clustersize;
3396 3397
	unsigned int db_count;
	unsigned int i;
3398
	int needs_recovery, has_huge_files, has_bigalloc;
3399
	__u64 blocks_count;
3400
	int err = 0;
3401
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3402
	ext4_group_t first_not_zeroed;
3403

3404 3405
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3406

3407
	sbi->s_daxdev = dax_dev;
3408 3409
	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3410 3411 3412
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3413
	sb->s_fs_info = sbi;
3414
	sbi->s_sb = sb;
3415
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
3416
	sbi->s_sb_block = sb_block;
3417 3418 3419
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3420

3421
	/* Cleanup superblock name */
3422
	strreplace(sb->s_id, '/', '!');
3423

3424
	/* -EINVAL is default */
3425
	ret = -EINVAL;
3426
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3427
	if (!blocksize) {
3428
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3429 3430 3431 3432
		goto out_fail;
	}

	/*
3433
	 * The ext4 superblock will not be buffer aligned for other than 1kB
3434 3435
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
3436
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3437 3438
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3439
	} else {
3440
		logical_sb_block = sb_block;
3441 3442
	}

3443
	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3444
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3445 3446 3447 3448
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
3449
	 *       some ext4 macro-instructions depend on its value
3450
	 */
3451
	es = (struct ext4_super_block *) (bh->b_data + offset);
3452 3453
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
3454 3455
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
3456
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3457

3458
	/* Warn if metadata_csum and gdt_csum are both set. */
3459 3460
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
3461
		ext4_warning(sb, "metadata_csum and uninit_bg are "
3462 3463
			     "redundant flags; please run fsck.");

3464 3465 3466 3467 3468 3469 3470 3471
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

3472
	/* Load the checksum driver */
3473 3474
	if (ext4_has_feature_metadata_csum(sb) ||
	    ext4_has_feature_ea_inode(sb)) {
3475 3476 3477 3478 3479 3480 3481 3482 3483
		sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
		if (IS_ERR(sbi->s_chksum_driver)) {
			ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
			ret = PTR_ERR(sbi->s_chksum_driver);
			sbi->s_chksum_driver = NULL;
			goto failed_mount;
		}
	}

3484 3485 3486 3487 3488
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
3489
		ret = -EFSBADCRC;
3490 3491 3492 3493
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
3494
	if (ext4_has_feature_csum_seed(sb))
3495
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
3496
	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3497 3498 3499
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

3500 3501
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3502
	set_opt(sb, INIT_INODE_TABLE);
3503
	if (def_mount_opts & EXT4_DEFM_DEBUG)
3504
		set_opt(sb, DEBUG);
3505
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3506
		set_opt(sb, GRPID);
3507
	if (def_mount_opts & EXT4_DEFM_UID16)
3508
		set_opt(sb, NO_UID32);
3509 3510
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
Theodore Ts'o's avatar
Theodore Ts'o committed
3511
#ifdef CONFIG_EXT4_FS_POSIX_ACL
3512
	set_opt(sb, POSIX_ACL);
3513
#endif
3514 3515 3516 3517
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

3518
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3519
		set_opt(sb, JOURNAL_DATA);
3520
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3521
		set_opt(sb, ORDERED_DATA);
3522
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3523
		set_opt(sb, WRITEBACK_DATA);
3524 3525

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3526
		set_opt(sb, ERRORS_PANIC);
3527
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3528
		set_opt(sb, ERRORS_CONT);
3529
	else
3530
		set_opt(sb, ERRORS_RO);
3531 3532
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
3533
	if (def_mount_opts & EXT4_DEFM_DISCARD)
3534
		set_opt(sb, DISCARD);
3535

3536 3537
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3538 3539 3540
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3541

3542
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3543
		set_opt(sb, BARRIER);
3544

3545 3546 3547 3548
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
3549
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3550
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3551
		set_opt(sb, DELALLOC);
3552

3553 3554 3555 3556 3557 3558
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
3572
	}
3573
	sbi->s_def_mount_opt = sbi->s_mount_opt;
3574
	if (!parse_options((char *) data, sb, &journal_devnum,
3575
			   &journal_ioprio, 0))
3576 3577
		goto failed_mount;

3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
			    "with data=journal disables delayed "
			    "allocation and O_DIRECT support!\n");
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
3589
				 "both data=journal and dioread_nolock");
3590 3591
			goto failed_mount;
		}
Ross Zwisler's avatar
Ross Zwisler committed
3592 3593 3594 3595 3596
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
3597 3598 3599 3600 3601
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
3602 3603
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
3604 3605
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
3606 3607
	}

3608
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3609
		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3610

3611
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3612 3613 3614
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
3615 3616 3617
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
3618

3619 3620
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
3621
		if (ext4_has_feature_64bit(sb)) {
3622 3623 3624 3625
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
3626 3627 3628 3629 3630 3631 3632 3633 3634 3635

		/*
		 * ea_inode feature uses l_i_version field which is not
		 * available in HURD_COMPAT mode.
		 */
		if (ext4_has_feature_ea_inode(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "ea_inode feature is not supported for Hurd");
			goto failed_mount;
		}
3636 3637
	}

3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

3660 3661 3662 3663 3664
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
3665
	if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
3666
		goto failed_mount;
3667

3668
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3669 3670
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3671
		ext4_msg(sb, KERN_ERR,
3672 3673 3674 3675 3676 3677 3678 3679 3680
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
3681 3682 3683
		goto failed_mount;
	}

3684 3685 3686 3687 3688 3689 3690
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

Ross Zwisler's avatar
Ross Zwisler committed
3691
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
3692 3693
		err = bdev_dax_supported(sb, blocksize);
		if (err)
Ross Zwisler's avatar
Ross Zwisler committed
3694 3695 3696
			goto failed_mount;
	}

3697
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3698 3699 3700 3701 3702
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

3703
	if (sb->s_blocksize != blocksize) {
3704 3705
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
3706
			ext4_msg(sb, KERN_ERR, "bad block size %d",
3707
					blocksize);
3708 3709 3710
			goto failed_mount;
		}

3711
		brelse(bh);
3712 3713
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3714
		bh = sb_bread_unmovable(sb, logical_sb_block);
3715
		if (!bh) {
3716 3717
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
3718 3719
			goto failed_mount;
		}
3720
		es = (struct ext4_super_block *)(bh->b_data + offset);
3721
		sbi->s_es = es;
3722
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3723 3724
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
3725 3726 3727 3728
			goto failed_mount;
		}
	}

3729
	has_huge_files = ext4_has_feature_huge_file(sb);
3730 3731 3732
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3733

3734 3735 3736
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3737 3738 3739
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3740
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
Vignesh Babu's avatar
Vignesh Babu committed
3741
		    (!is_power_of_2(sbi->s_inode_size)) ||
3742
		    (sbi->s_inode_size > blocksize)) {
3743 3744
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
3745
			       sbi->s_inode_size);
3746 3747
			goto failed_mount;
		}
3748 3749
		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3750
	}
3751

3752
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3753
	if (ext4_has_feature_64bit(sb)) {
3754
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3755
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
vignesh babu's avatar
vignesh babu committed
3756
		    !is_power_of_2(sbi->s_desc_size)) {
3757 3758
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
3759 3760 3761 3762 3763
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3764

3765 3766
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3767

3768
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3769
	if (sbi->s_inodes_per_block == 0)
3770
		goto cantfind_ext4;
3771 3772 3773 3774 3775 3776
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
			 sbi->s_blocks_per_group);
		goto failed_mount;
	}
3777 3778
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
3779
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3780 3781
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
3782 3783
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3784

3785
	for (i = 0; i < 4; i++)
3786 3787
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
3788
	if (ext4_has_feature_dir_index(sb)) {
3789 3790 3791 3792
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3793
#ifdef __CHAR_UNSIGNED__
3794 3795 3796 3797
			if (!(sb->s_flags & MS_RDONLY))
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
3798
#else
3799 3800 3801
			if (!(sb->s_flags & MS_RDONLY))
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3802
#endif
3803
		}
3804
	}
3805

3806 3807
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3808
	has_bigalloc = ext4_has_feature_bigalloc(sb);
3809 3810 3811 3812 3813 3814 3815
	if (has_bigalloc) {
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
3816 3817 3818 3819 3820 3821 3822
		if (le32_to_cpu(es->s_log_cluster_size) >
		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid log cluster size: %u",
				 le32_to_cpu(es->s_log_cluster_size));
			goto failed_mount;
		}
3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
			ext4_warning(sb, "fragment/cluster size (%d) != "
				     "block size (%d)", clustersize,
				     blocksize);
			clustersize = blocksize;
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
3856
	}
3857 3858
	sbi->s_cluster_ratio = clustersize / blocksize;

3859 3860 3861 3862
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

3863 3864 3865 3866
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
3867
	err = generic_check_addressable(sb->s_blocksize_bits,
3868
					ext4_blocks_count(es));
3869
	if (err) {
3870
		ext4_msg(sb, KERN_ERR, "filesystem"
3871
			 " too large to mount safely on this system");
3872
		if (sizeof(sector_t) < 8)
3873
			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3874 3875 3876
		goto failed_mount;
	}

3877 3878
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
3879

3880 3881 3882
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3883 3884
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
3885 3886 3887 3888
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

3889 3890 3891 3892 3893
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3894
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3895 3896 3897
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
3898 3899
		goto failed_mount;
	}
3900 3901 3902 3903
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
3904
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3905
		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
3906
		       "(block count %llu, first data block %u, "
3907
		       "blocks per group %lu)", sbi->s_groups_count,
3908 3909 3910 3911 3912
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
3913
	sbi->s_groups_count = blocks_count;
3914 3915
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3916 3917
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
3918
	if (ext4_has_feature_meta_bg(sb)) {
3919
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
3920 3921 3922 3923 3924 3925 3926
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
3927
	sbi->s_group_desc = kvmalloc(db_count *
3928 3929
					  sizeof(struct buffer_head *),
					  GFP_KERNEL);
3930
	if (sbi->s_group_desc == NULL) {
3931
		ext4_msg(sb, KERN_ERR, "not enough memory");
3932
		ret = -ENOMEM;
3933 3934 3935
		goto failed_mount;
	}

3936
	bgl_lock_init(sbi->s_blockgroup_lock);
3937

3938 3939 3940 3941 3942 3943
	/* Pre-read the descriptors into the buffer cache */
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logical_sb_block, i);
		sb_breadahead(sb, block);
	}

3944
	for (i = 0; i < db_count; i++) {
3945
		block = descriptor_loc(sb, logical_sb_block, i);
3946
		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3947
		if (!sbi->s_group_desc[i]) {
3948 3949
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
3950 3951 3952 3953
			db_count = i;
			goto failed_mount2;
		}
	}
3954
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3955
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3956
		ret = -EFSCORRUPTED;
3957
		goto failed_mount2;
3958
	}
3959

3960
	sbi->s_gdb_count = db_count;
3961 3962 3963
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

3964 3965
	setup_timer(&sbi->s_err_report, print_daily_error_info,
		(unsigned long) sb);
3966

3967
	/* Register extent status tree shrinker */
3968
	if (ext4_es_register_shrinker(sbi))
3969 3970
		goto failed_mount3;

3971
	sbi->s_stripe = ext4_get_stripe_size(sbi);
3972
	sbi->s_extent_max_zeroout_kb = 32;
3973

3974 3975 3976
	/*
	 * set up enough so that it can read an inode
	 */
3977
	sb->s_op = &ext4_sops;
3978 3979
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
3980
	sb->s_cop = &ext4_cryptops;
3981
#ifdef CONFIG_QUOTA
3982
	sb->dq_op = &ext4_quota_operations;
3983
	if (ext4_has_feature_quota(sb))
3984
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
3985 3986
	else
		sb->s_qcop = &ext4_qctl_operations;
Li Xi's avatar
Li Xi committed
3987
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3988
#endif
3989
	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
3990

3991
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
3992
	mutex_init(&sbi->s_orphan_lock);
3993 3994 3995 3996

	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
3997
			  ext4_has_feature_journal_needs_recovery(sb));
3998

3999
	if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
4000
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4001
			goto failed_mount3a;
4002

4003 4004 4005 4006
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4007
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4008 4009
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4010
			goto failed_mount3a;
4011
	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
4012
		   ext4_has_feature_journal_needs_recovery(sb)) {
4013 4014
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4015
		goto failed_mount_wq;
4016
	} else {
4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
		clear_opt(sb, JOURNAL_CHECKSUM);
4042
		clear_opt(sb, DATA_FLAGS);
4043 4044 4045
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4046 4047
	}

4048
	if (ext4_has_feature_64bit(sb) &&
4049 4050
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4051
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4052
		goto failed_mount_wq;
4053 4054
	}

4055 4056 4057 4058
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4059
	}
4060

4061 4062 4063 4064 4065
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
4066 4067 4068
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4069 4070
		if (jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
4071
			set_opt(sb, ORDERED_DATA);
4072
		else
4073
			set_opt(sb, JOURNAL_DATA);
4074 4075
		break;

4076 4077
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4078 4079
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4080 4081
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4082
			goto failed_mount_wq;
4083 4084 4085 4086
		}
	default:
		break;
	}
4087 4088 4089 4090 4091 4092 4093 4094

	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with "
			"journal_async_commit in data=ordered mode");
		goto failed_mount_wq;
	}

4095
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4096

Bobi Jam's avatar
Bobi Jam committed
4097 4098
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;

4099
no_journal:
4100 4101 4102
	if (!test_opt(sb, NO_MBCACHE)) {
		sbi->s_ea_block_cache = ext4_xattr_create_cache();
		if (!sbi->s_ea_block_cache) {
4103
			ext4_msg(sb, KERN_ERR,
4104
				 "Failed to create ea_block_cache");
4105 4106
			goto failed_mount_wq;
		}
4107 4108 4109 4110 4111 4112 4113 4114 4115

		if (ext4_has_feature_ea_inode(sb)) {
			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
			if (!sbi->s_ea_inode_cache) {
				ext4_msg(sb, KERN_ERR,
					 "Failed to create ea_inode_cache");
				goto failed_mount_wq;
			}
		}
4116 4117
	}

4118
	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4119
	    (blocksize != PAGE_SIZE)) {
4120 4121 4122 4123 4124
		ext4_msg(sb, KERN_ERR,
			 "Unsupported blocksize for fs encryption");
		goto failed_mount_wq;
	}

4125 4126 4127
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4128 4129 4130
		ext4_commit_super(sb, 1);
	}

4131 4132 4133 4134 4135 4136 4137
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4138 4139
		err = ext4_calculate_overhead(sb);
		if (err)
4140 4141 4142
			goto failed_mount_wq;
	}

4143 4144 4145 4146
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4147 4148 4149 4150
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4151
		ret = -ENOMEM;
4152 4153 4154
		goto failed_mount4;
	}

4155
	/*
4156
	 * The jbd2_journal_load will have done any necessary log recovery,
4157 4158 4159
	 * so we can safely mount the rest of the filesystem now.
	 */

4160 4161
	root = ext4_iget(sb, EXT4_ROOT_INO);
	if (IS_ERR(root)) {
4162
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4163
		ret = PTR_ERR(root);
4164
		root = NULL;
4165 4166 4167
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4168
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
Al Viro's avatar
Al Viro committed
4169
		iput(root);
4170 4171
		goto failed_mount4;
	}
4172
	sb->s_root = d_make_root(root);
4173
	if (!sb->s_root) {
4174
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4175 4176 4177
		ret = -ENOMEM;
		goto failed_mount4;
	}
4178

4179 4180
	if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
4181 4182

	/* determine the minimum size of new large inodes, if present */
4183 4184
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
	    sbi->s_want_extra_isize == 0) {
4185 4186
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						     EXT4_GOOD_OLD_INODE_SIZE;
4187
		if (ext4_has_feature_extra_isize(sb)) {
4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_want_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_want_extra_isize);
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_min_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_min_extra_isize);
		}
	}
	/* Check if enough inode space is available */
	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
							sbi->s_inode_size) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						       EXT4_GOOD_OLD_INODE_SIZE;
4203 4204
		ext4_msg(sb, KERN_INFO, "required extra inode space not"
			 "available");
4205 4206
	}

4207
	ext4_set_resv_clusters(sb);
4208

4209 4210
	err = ext4_setup_system_zone(sb);
	if (err) {
4211
		ext4_msg(sb, KERN_ERR, "failed to initialize system "
4212
			 "zone (%d)", err);
4213 4214 4215 4216 4217 4218 4219 4220
		goto failed_mount4a;
	}

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4221
		goto failed_mount5;
4222 4223
	}

4224 4225 4226
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4227 4228
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4229 4230 4231
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4232 4233
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4234 4235 4236
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4237
					  ext4_count_dirs(sb), GFP_KERNEL);
4238
	if (!err)
4239 4240
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4241 4242 4243
	if (!err)
		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);

4244 4245 4246 4247 4248
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4249
	if (ext4_has_feature_flex_bg(sb))
4250 4251 4252 4253 4254 4255 4256
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4257 4258
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4259
		goto failed_mount6;
4260

4261
	err = ext4_register_sysfs(sb);
4262 4263
	if (err)
		goto failed_mount7;
Theodore Ts'o's avatar
Theodore Ts'o committed
4264

4265 4266
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4267
	if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
4268 4269 4270 4271 4272 4273
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4274 4275 4276
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4277
	if (needs_recovery) {
4278
		ext4_msg(sb, KERN_INFO, "recovery complete");
4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290
		ext4_mark_recovery_complete(sb, es);
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

4291 4292 4293 4294 4295 4296 4297 4298
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

4299 4300
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4301 4302 4303
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
4304
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4305

4306 4307
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4308

4309 4310 4311 4312 4313
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);

4314
	kfree(orig_data);
4315 4316
	return 0;

4317
cantfind_ext4:
4318
	if (!silent)
4319
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4320 4321
	goto failed_mount;

4322 4323
#ifdef CONFIG_QUOTA
failed_mount8:
4324
	ext4_unregister_sysfs(sb);
4325
#endif
4326 4327 4328
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
4329
	ext4_mb_release(sb);
4330
	if (sbi->s_flex_groups)
Al Viro's avatar
Al Viro committed
4331
		kvfree(sbi->s_flex_groups);
4332 4333 4334 4335
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4336
failed_mount5:
4337 4338 4339
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
Al Viro's avatar
Al Viro committed
4340
	dput(sb->s_root);
4341
	sb->s_root = NULL;
Al Viro's avatar
Al Viro committed
4342
failed_mount4:
4343
	ext4_msg(sb, KERN_ERR, "mount failed");
4344 4345
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4346
failed_mount_wq:
4347 4348 4349 4350
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
4351 4352 4353
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
Jan Kara's avatar
Jan Kara committed
4354
	}
4355 4356 4357 4358
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
4359
failed_mount3a:
4360
	ext4_es_unregister_shrinker(sbi);
4361
failed_mount3:
4362
	del_timer_sync(&sbi->s_err_report);
4363 4364
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
4365 4366 4367
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
Al Viro's avatar
Al Viro committed
4368
	kvfree(sbi->s_group_desc);
4369
failed_mount:
4370 4371
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
4372
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
4373
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4374 4375
		kfree(sbi->s_qf_names[i]);
#endif
4376
	ext4_blkdev_remove(sbi);
4377 4378 4379
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
4380
	kfree(sbi->s_blockgroup_lock);
4381
out_free_base:
4382
	kfree(sbi);
4383
	kfree(orig_data);
4384
	fs_put_dax(dax_dev);
4385
	return err ? err : ret;
4386 4387 4388 4389 4390 4391 4392
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
4393
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4394
{
4395
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4396

4397 4398 4399
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
4400

4401
	write_lock(&journal->j_state_lock);
4402
	if (test_opt(sb, BARRIER))
4403
		journal->j_flags |= JBD2_BARRIER;
4404
	else
4405
		journal->j_flags &= ~JBD2_BARRIER;
4406 4407 4408 4409
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4410
	write_unlock(&journal->j_state_lock);
4411 4412
}

4413 4414
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
4415 4416 4417
{
	struct inode *journal_inode;

4418 4419 4420 4421 4422
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
4423 4424
	journal_inode = ext4_iget(sb, journal_inum);
	if (IS_ERR(journal_inode)) {
4425
		ext4_msg(sb, KERN_ERR, "no journal found");
4426 4427 4428 4429 4430
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
4431
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4432 4433 4434
		return NULL;
	}

4435
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4436
		  journal_inode, journal_inode->i_size);
4437
	if (!S_ISREG(journal_inode->i_mode)) {
4438
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4439 4440 4441
		iput(journal_inode);
		return NULL;
	}
4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

	BUG_ON(!ext4_has_feature_journal(sb));

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
4456

4457
	journal = jbd2_journal_init_inode(journal_inode);
4458
	if (!journal) {
4459
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4460 4461 4462 4463
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
4464
	ext4_init_journal_params(sb, journal);
4465 4466 4467
	return journal;
}

4468
static journal_t *ext4_get_dev_journal(struct super_block *sb,
4469 4470
				       dev_t j_dev)
{
4471
	struct buffer_head *bh;
4472
	journal_t *journal;
4473 4474
	ext4_fsblk_t start;
	ext4_fsblk_t len;
4475
	int hblock, blocksize;
4476
	ext4_fsblk_t sb_block;
4477
	unsigned long offset;
4478
	struct ext4_super_block *es;
4479 4480
	struct block_device *bdev;

4481
	BUG_ON(!ext4_has_feature_journal(sb));
4482

4483
	bdev = ext4_blkdev_get(j_dev, sb);
4484 4485 4486 4487
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
4488
	hblock = bdev_logical_block_size(bdev);
4489
	if (blocksize < hblock) {
4490 4491
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
4492 4493 4494
		goto out_bdev;
	}

4495 4496
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4497 4498
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4499 4500
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
4501 4502 4503
		goto out_bdev;
	}

4504
	es = (struct ext4_super_block *) (bh->b_data + offset);
4505
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4506
	    !(le32_to_cpu(es->s_feature_incompat) &
4507
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4508 4509
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
4510 4511 4512 4513
		brelse(bh);
		goto out_bdev;
	}

4514 4515 4516 4517 4518 4519 4520 4521 4522
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

4523
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4524
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4525 4526 4527 4528
		brelse(bh);
		goto out_bdev;
	}

4529
	len = ext4_blocks_count(es);
4530 4531 4532
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

4533
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4534 4535
					start, len, blocksize);
	if (!journal) {
4536
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4537 4538 4539
		goto out_bdev;
	}
	journal->j_private = sb;
4540
	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4541 4542
	wait_on_buffer(journal->j_sb_buffer);
	if (!buffer_uptodate(journal->j_sb_buffer)) {
4543
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4544 4545 4546
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4547 4548
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
4549 4550 4551
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
4552 4553
	EXT4_SB(sb)->journal_bdev = bdev;
	ext4_init_journal_params(sb, journal);
4554
	return journal;
4555

4556
out_journal:
4557
	jbd2_journal_destroy(journal);
4558
out_bdev:
4559
	ext4_blkdev_put(bdev);
4560 4561 4562
	return NULL;
}

4563 4564
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
4565 4566 4567 4568 4569 4570 4571 4572
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;

4573
	BUG_ON(!ext4_has_feature_journal(sb));
4574

4575 4576
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4577 4578
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

	really_read_only = bdev_read_only(sb->s_bdev);

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
4590
	if (ext4_has_feature_journal_needs_recovery(sb)) {
4591
		if (sb->s_flags & MS_RDONLY) {
4592 4593
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
4594
			if (really_read_only) {
4595 4596
				ext4_msg(sb, KERN_ERR, "write access "
					"unavailable, cannot proceed");
4597 4598
				return -EROFS;
			}
4599 4600
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
4601 4602 4603 4604
		}
	}

	if (journal_inum && journal_dev) {
4605 4606
		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
		       "and inode journals!");
4607 4608 4609 4610
		return -EINVAL;
	}

	if (journal_inum) {
4611
		if (!(journal = ext4_get_journal(sb, journal_inum)))
4612 4613
			return -EINVAL;
	} else {
4614
		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4615 4616 4617
			return -EINVAL;
	}

4618
	if (!(journal->j_flags & JBD2_BARRIER))
4619
		ext4_msg(sb, KERN_INFO, "barriers disabled");
4620

4621
	if (!ext4_has_feature_journal_needs_recovery(sb))
4622
		err = jbd2_journal_wipe(journal, !really_read_only);
4623 4624 4625 4626 4627
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4628
		err = jbd2_journal_load(journal);
4629 4630 4631 4632 4633
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
4634 4635

	if (err) {
4636
		ext4_msg(sb, KERN_ERR, "error loading journal");
4637
		jbd2_journal_destroy(journal);
4638 4639 4640
		return err;
	}

4641 4642
	EXT4_SB(sb)->s_journal = journal;
	ext4_clear_journal_err(sb, es);
4643

4644
	if (!really_read_only && journal_devnum &&
4645 4646 4647 4648
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
4649
		ext4_commit_super(sb, 1);
4650 4651 4652 4653 4654
	}

	return 0;
}

4655
static int ext4_commit_super(struct super_block *sb, int sync)
4656
{
4657
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4658
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4659
	int error = 0;
4660

4661
	if (!sbh || block_device_ejected(sb))
4662
		return error;
4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
	if (!(sb->s_flags & MS_RDONLY))
		es->s_wtime = cpu_to_le32(get_seconds());
4675 4676 4677
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4678 4679
			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4680 4681 4682
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4683 4684
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
4685 4686
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
4687 4688 4689
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
4690
				&EXT4_SB(sb)->s_freeinodes_counter));
4691
	BUFFER_TRACE(sbh, "marking dirty");
4692
	ext4_superblock_csum_set(sb);
4693 4694
	if (sync)
		lock_buffer(sbh);
4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708
	if (buffer_write_io_error(sbh)) {
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
4709
	mark_buffer_dirty(sbh);
4710
	if (sync) {
4711
		unlock_buffer(sbh);
4712
		error = __sync_dirty_buffer(sbh,
4713
			REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
4714 4715 4716 4717 4718
		if (error)
			return error;

		error = buffer_write_io_error(sbh);
		if (error) {
4719 4720
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
4721 4722 4723 4724
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
4725
	return error;
4726 4727 4728 4729 4730 4731 4732
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
4733 4734
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es)
4735
{
4736
	journal_t *journal = EXT4_SB(sb)->s_journal;
4737

4738
	if (!ext4_has_feature_journal(sb)) {
4739 4740 4741
		BUG_ON(journal != NULL);
		return;
	}
4742
	jbd2_journal_lock_updates(journal);
4743 4744 4745
	if (jbd2_journal_flush(journal) < 0)
		goto out;

4746
	if (ext4_has_feature_journal_needs_recovery(sb) &&
4747
	    sb->s_flags & MS_RDONLY) {
4748
		ext4_clear_feature_journal_needs_recovery(sb);
4749
		ext4_commit_super(sb, 1);
4750
	}
4751 4752

out:
4753
	jbd2_journal_unlock_updates(journal);
4754 4755 4756 4757 4758 4759 4760
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
4761 4762
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es)
4763 4764 4765 4766 4767
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

4768
	BUG_ON(!ext4_has_feature_journal(sb));
4769

4770
	journal = EXT4_SB(sb)->s_journal;
4771 4772 4773

	/*
	 * Now check for any error status which may have been recorded in the
4774
	 * journal by a prior ext4_error() or ext4_abort()
4775 4776
	 */

4777
	j_errno = jbd2_journal_errno(journal);
4778 4779 4780
	if (j_errno) {
		char nbuf[16];

4781
		errstr = ext4_decode_error(sb, j_errno, nbuf);
4782
		ext4_warning(sb, "Filesystem error recorded "
4783
			     "from previous mount: %s", errstr);
4784
		ext4_warning(sb, "Marking fs in need of filesystem check.");
4785

4786 4787
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4788
		ext4_commit_super(sb, 1);
4789

4790
		jbd2_journal_clear_err(journal);
4791
		jbd2_journal_update_sb_errno(journal);
4792 4793 4794 4795 4796 4797 4798
	}
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
4799
int ext4_force_commit(struct super_block *sb)
4800 4801 4802 4803 4804 4805
{
	journal_t *journal;

	if (sb->s_flags & MS_RDONLY)
		return 0;

4806
	journal = EXT4_SB(sb)->s_journal;
4807
	return ext4_journal_force_commit(journal);
4808 4809
}

4810
static int ext4_sync_fs(struct super_block *sb, int wait)
4811
{
4812
	int ret = 0;
4813
	tid_t target;
4814
	bool needs_barrier = false;
4815
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4816

4817 4818 4819
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return 0;

4820
	trace_ext4_sync_fs(sb, wait);
4821
	flush_workqueue(sbi->rsv_conversion_wq);
4822 4823 4824 4825 4826
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
4827 4828 4829 4830 4831
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
4844 4845 4846 4847 4848 4849
		needs_barrier = true;
	if (needs_barrier) {
		int err;
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
		if (!ret)
			ret = err;
4850
	}
4851 4852 4853 4854

	return ret;
}

4855 4856 4857
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
4858 4859
 *
 * Note that only this function cannot bring a filesystem to be in a clean
4860 4861
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
4862
 */
4863
static int ext4_freeze(struct super_block *sb)
4864
{
4865 4866
	int error = 0;
	journal_t *journal;
4867

4868 4869
	if (sb->s_flags & MS_RDONLY)
		return 0;
4870

4871
	journal = EXT4_SB(sb)->s_journal;
4872

4873 4874 4875
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
4876

4877 4878 4879 4880 4881 4882 4883
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
4884 4885

		/* Journal blocked and flushed, clear needs_recovery flag. */
4886
		ext4_clear_feature_journal_needs_recovery(sb);
4887
	}
4888 4889

	error = ext4_commit_super(sb, 1);
4890
out:
4891 4892 4893
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
4894
	return error;
4895 4896 4897 4898 4899 4900
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
4901
static int ext4_unfreeze(struct super_block *sb)
4902
{
4903
	if ((sb->s_flags & MS_RDONLY) || ext4_forced_shutdown(EXT4_SB(sb)))
4904 4905
		return 0;

4906 4907
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
4908
		ext4_set_feature_journal_needs_recovery(sb);
4909 4910
	}

4911
	ext4_commit_super(sb, 1);
4912
	return 0;
4913 4914
}

4915 4916 4917 4918 4919
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
4920
	unsigned long s_mount_opt2;
4921 4922
	kuid_t s_resuid;
	kgid_t s_resgid;
4923 4924 4925 4926
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4927
	char *s_qf_names[EXT4_MAXQUOTAS];
4928 4929 4930
#endif
};

4931
static int ext4_remount(struct super_block *sb, int *flags, char *data)
4932
{
4933
	struct ext4_super_block *es;
4934
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4935
	unsigned long old_sb_flags;
4936
	struct ext4_mount_options old_opts;
4937
	int enable_quota = 0;
4938
	ext4_group_t g;
4939
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4940
	int err = 0;
4941
#ifdef CONFIG_QUOTA
4942
	int i, j;
4943
#endif
4944
	char *orig_data = kstrdup(data, GFP_KERNEL);
4945 4946 4947 4948

	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
4949
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4950 4951 4952
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
4953 4954
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
4955 4956
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4957
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4958 4959 4960 4961 4962 4963
		if (sbi->s_qf_names[i]) {
			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
							 GFP_KERNEL);
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
4964
				kfree(orig_data);
4965 4966 4967 4968
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
4969
#endif
4970 4971
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
4972

4973
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
4974 4975 4976 4977
		err = -EINVAL;
		goto restore_opts;
	}

4978
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4979 4980
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4981 4982
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
4983 4984
	}

4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
Ross Zwisler's avatar
Ross Zwisler committed
4998 4999 5000 5001 5002 5003
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			err = -EINVAL;
			goto restore_opts;
		}
5004 5005 5006 5007 5008 5009 5010
	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				"journal_async_commit in data=ordered mode");
			err = -EINVAL;
			goto restore_opts;
		}
Ross Zwisler's avatar
Ross Zwisler committed
5011 5012
	}

5013 5014 5015 5016 5017 5018
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
		err = -EINVAL;
		goto restore_opts;
	}

Ross Zwisler's avatar
Ross Zwisler committed
5019 5020 5021 5022
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
			"dax flag with busy inodes while remounting");
		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
5023 5024
	}

5025
	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
5026
		ext4_abort(sb, "Abort forced by user");
5027 5028

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
5029
		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
5030 5031 5032

	es = sbi->s_es;

5033
	if (sbi->s_journal) {
5034
		ext4_init_journal_params(sb, sbi->s_journal);
5035 5036
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5037

5038 5039 5040
	if (*flags & MS_LAZYTIME)
		sb->s_flags |= MS_LAZYTIME;

5041
	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
5042
		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5043 5044 5045 5046 5047
			err = -EROFS;
			goto restore_opts;
		}

		if (*flags & MS_RDONLY) {
5048 5049 5050
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5051 5052
			err = dquot_suspend(sb, -1);
			if (err < 0)
5053 5054
				goto restore_opts;

5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
			sb->s_flags |= MS_RDONLY;

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5066 5067
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5068 5069
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5070
			if (sbi->s_journal)
5071
				ext4_mark_recovery_complete(sb, es);
5072
		} else {
5073
			/* Make sure we can mount this feature set readwrite */
5074
			if (ext4_has_feature_readonly(sb) ||
5075
			    !ext4_feature_set_ok(sb, 0)) {
5076 5077 5078
				err = -EROFS;
				goto restore_opts;
			}
5079 5080
			/*
			 * Make sure the group descriptor checksums
5081
			 * are sane.  If they aren't, refuse to remount r/w.
5082 5083 5084 5085 5086
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5087
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5088 5089
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5090
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5091
					       le16_to_cpu(gdp->bg_checksum));
5092
					err = -EFSBADCRC;
5093 5094 5095 5096
					goto restore_opts;
				}
			}

5097 5098 5099 5100 5101 5102
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5103
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5104 5105
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5106
				       "umount/remount instead");
5107 5108 5109 5110
				err = -EINVAL;
				goto restore_opts;
			}

5111 5112 5113 5114 5115 5116
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5117 5118
			if (sbi->s_journal)
				ext4_clear_journal_err(sb, es);
5119
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5120
			if (!ext4_setup_super(sb, es, 0))
5121
				sb->s_flags &= ~MS_RDONLY;
5122
			if (ext4_has_feature_mmp(sb))
5123 5124 5125 5126 5127
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5128
			enable_quota = 1;
5129 5130
		}
	}
5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
	if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5144
	ext4_setup_system_zone(sb);
5145
	if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
5146
		ext4_commit_super(sb, 1);
5147

5148 5149
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
Jan Kara's avatar
Jan Kara committed
5150
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5151
		kfree(old_opts.s_qf_names[i]);
5152 5153 5154
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
5155
		else if (ext4_has_feature_quota(sb)) {
5156
			err = ext4_enable_quotas(sb);
5157
			if (err)
5158 5159 5160
				goto restore_opts;
		}
	}
5161
#endif
5162

5163
	*flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
5164 5165
	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
5166
	return 0;
5167

5168 5169 5170
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
5171
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5172 5173 5174
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
5175 5176
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
5177 5178
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
5179
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5180
		kfree(sbi->s_qf_names[i]);
5181 5182 5183
		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
	}
#endif
5184
	kfree(orig_data);
5185 5186 5187
	return err;
}

Li Xi's avatar
Li Xi committed
5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
	spin_lock(&dq_data_lock);

	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
		 dquot->dq_dqb.dqb_bsoftlimit :
		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
	if (limit && buf->f_blocks > limit) {
		curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

	limit = dquot->dq_dqb.dqb_isoftlimit ?
		dquot->dq_dqb.dqb_isoftlimit :
		dquot->dq_dqb.dqb_ihardlimit;
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

	spin_unlock(&dq_data_lock);
	dqput(dquot);
	return 0;
}
#endif

5230
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5231 5232
{
	struct super_block *sb = dentry->d_sb;
5233 5234
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
5235
	ext4_fsblk_t overhead = 0, resv_blocks;
5236
	u64 fsid;
5237
	s64 bfree;
5238
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5239

5240 5241
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
5242

5243
	buf->f_type = EXT4_SUPER_MAGIC;
5244
	buf->f_bsize = sb->s_blocksize;
5245
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5246 5247
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5248
	/* prevent underflow in case that few free space is available */
5249
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
5250 5251 5252
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5253 5254
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
5255
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5256
	buf->f_namelen = EXT4_NAME_LEN;
5257 5258 5259 5260
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5261

Li Xi's avatar
Li Xi committed
5262 5263 5264 5265 5266
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
5267 5268 5269
	return 0;
}

5270 5271
/* Helper function for writing quotas on sync - we need to start transaction
 * before quota file is locked for write. Otherwise the are possible deadlocks:
5272
 * Process 1                         Process 2
5273
 * ext4_create()                     quota_sync()
5274
 *   jbd2_journal_start()                  write_dquot()
5275
 *   dquot_initialize()                         down(dqio_mutex)
5276
 *     down(dqio_mutex)                    jbd2_journal_start()
5277 5278 5279 5280 5281 5282 5283
 *
 */

#ifdef CONFIG_QUOTA

static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
5284
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5285 5286
}

5287
static int ext4_write_dquot(struct dquot *dquot)
5288 5289 5290 5291 5292 5293
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
5294
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5295
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5296 5297 5298
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
5299
	err = ext4_journal_stop(handle);
5300 5301 5302 5303 5304
	if (!ret)
		ret = err;
	return ret;
}

5305
static int ext4_acquire_dquot(struct dquot *dquot)
5306 5307 5308 5309
{
	int ret, err;
	handle_t *handle;

5310
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5311
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5312 5313 5314
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
5315
	err = ext4_journal_stop(handle);
5316 5317 5318 5319 5320
	if (!ret)
		ret = err;
	return ret;
}

5321
static int ext4_release_dquot(struct dquot *dquot)
5322 5323 5324 5325
{
	int ret, err;
	handle_t *handle;

5326
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5327
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
Jan Kara's avatar
Jan Kara committed
5328 5329 5330
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
5331
		return PTR_ERR(handle);
Jan Kara's avatar
Jan Kara committed
5332
	}
5333
	ret = dquot_release(dquot);
5334
	err = ext4_journal_stop(handle);
5335 5336 5337 5338 5339
	if (!ret)
		ret = err;
	return ret;
}

5340
static int ext4_mark_dquot_dirty(struct dquot *dquot)
5341
{
5342 5343 5344
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

5345
	/* Are we journaling quotas? */
5346
	if (ext4_has_feature_quota(sb) ||
5347
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5348
		dquot_mark_dquot_dirty(dquot);
5349
		return ext4_write_dquot(dquot);
5350 5351 5352 5353 5354
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

5355
static int ext4_write_info(struct super_block *sb, int type)
5356 5357 5358 5359 5360
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
5361
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5362 5363 5364
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
5365
	err = ext4_journal_stop(handle);
5366 5367 5368 5369 5370 5371 5372 5373 5374
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
5375
static int ext4_quota_on_mount(struct super_block *sb, int type)
5376
{
5377 5378
	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
					EXT4_SB(sb)->s_jquota_fmt, type);
5379 5380
}

5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

5395 5396 5397
/*
 * Standard function to be called on quota_on
 */
5398
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5399
			 const struct path *path)
5400 5401 5402 5403 5404
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
5405

5406
	/* Quotafile not on the same filesystem? */
5407
	if (path->dentry->d_sb != sb)
5408
		return -EXDEV;
5409 5410
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
5411
		/* Quotafile not in fs root? */
5412
		if (path->dentry->d_parent != sb->s_root)
5413 5414 5415
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
5416
	}
5417 5418 5419 5420 5421

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
5422
	if (EXT4_SB(sb)->s_journal &&
5423
	    ext4_should_journal_data(d_inode(path->dentry))) {
5424 5425 5426 5427 5428
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5429
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5430
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5431
		if (err)
5432
			return err;
5433
	}
5434

5435 5436
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
5437
	if (err) {
5438 5439
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
5440 5441 5442 5443
	} else {
		struct inode *inode = d_inode(path->dentry);
		handle_t *handle;

5444 5445 5446 5447 5448
		/*
		 * Set inode flags to prevent userspace from messing with quota
		 * files. If this fails, we return success anyway since quotas
		 * are already enabled and this is not a hard failure.
		 */
5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460
		inode_lock(inode);
		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
		if (IS_ERR(handle))
			goto unlock_inode;
		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
				S_NOATIME | S_IMMUTABLE);
		ext4_mark_inode_dirty(handle, inode);
		ext4_journal_stop(handle);
	unlock_inode:
		inode_unlock(inode);
	}
5461
	return err;
5462 5463
}

5464 5465 5466 5467 5468
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
Jan Kara's avatar
Jan Kara committed
5469
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5470
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
Li Xi's avatar
Li Xi committed
5471 5472
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5473 5474
	};

5475
	BUG_ON(!ext4_has_feature_quota(sb));
5476 5477 5478 5479 5480 5481 5482 5483 5484 5485

	if (!qf_inums[type])
		return -EPERM;

	qf_inode = ext4_iget(sb, qf_inums[type]);
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

5486 5487
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
5488
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5489 5490
	err = dquot_enable(qf_inode, type, format_id, flags);
	iput(qf_inode);
5491 5492
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5493 5494 5495 5496 5497 5498 5499 5500

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
Jan Kara's avatar
Jan Kara committed
5501
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5502
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
Li Xi's avatar
Li Xi committed
5503 5504
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5505
	};
5506 5507 5508 5509 5510
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
5511 5512

	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
Jan Kara's avatar
Jan Kara committed
5513
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5514 5515
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5516 5517
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5518 5519
			if (err) {
				ext4_warning(sb,
5520 5521 5522
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
5523 5524 5525 5526 5527 5528 5529
				return err;
			}
		}
	}
	return 0;
}

5530 5531
static int ext4_quota_off(struct super_block *sb, int type)
{
5532 5533
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;
5534
	int err;
5535

5536 5537 5538
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
5539 5540
		sync_filesystem(sb);

5541
	if (!inode || !igrab(inode))
5542 5543
		goto out;

5544
	err = dquot_quota_off(sb, type);
5545
	if (err || ext4_has_feature_quota(sb))
5546 5547 5548
		goto out_put;

	inode_lock(inode);
5549 5550 5551 5552 5553
	/*
	 * Update modification times of quota files when userspace can
	 * start looking at them. If we fail, we return success anyway since
	 * this is not a hard failure and quotas are already disabled.
	 */
5554
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5555
	if (IS_ERR(handle))
5556 5557 5558
		goto out_unlock;
	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
5559
	inode->i_mtime = inode->i_ctime = current_time(inode);
5560 5561
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);
5562 5563 5564
out_unlock:
	inode_unlock(inode);
out_put:
5565
	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5566 5567
	iput(inode);
	return err;
5568
out:
5569 5570 5571
	return dquot_quota_off(sb, type);
}

5572 5573
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
Lucas De Marchi's avatar
Lucas De Marchi committed
5574
 * itself serializes the operations (and no one else should touch the files)
5575
 * we don't have to be afraid of races */
5576
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5577 5578 5579
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
5580
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
5595 5596 5597
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
5613
static ssize_t ext4_quota_write(struct super_block *sb, int type,
5614 5615 5616
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
5617
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5618
	int err, offset = off & (sb->s_blocksize - 1);
5619
	int retries = 0;
5620 5621 5622
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

5623
	if (EXT4_SB(sb)->s_journal && !handle) {
5624 5625
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
Jan Kara's avatar
Jan Kara committed
5626 5627 5628
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

5640 5641 5642 5643 5644 5645
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
		 ext4_should_retry_alloc(inode->i_sb, &retries));
5646 5647
	if (IS_ERR(bh))
		return PTR_ERR(bh);
5648 5649
	if (!bh)
		goto out;
5650
	BUFFER_TRACE(bh, "get write access");
5651 5652 5653
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
5654
		return err;
5655
	}
5656 5657 5658 5659
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
5660
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5661
	brelse(bh);
5662
out:
5663 5664
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
5665
		EXT4_I(inode)->i_disksize = inode->i_size;
5666
		ext4_mark_inode_dirty(handle, inode);
5667
	}
5668
	return len;
5669 5670
}

5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
{
	const struct quota_format_ops	*ops;

	if (!sb_has_quota_loaded(sb, qid->type))
		return -ESRCH;
	ops = sb_dqopt(sb)->ops[qid->type];
	if (!ops || !ops->get_next_id)
		return -ENOSYS;
	return dquot_get_next_id(sb, qid);
}
5682 5683
#endif

Al Viro's avatar
Al Viro committed
5684 5685
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
5686
{
Al Viro's avatar
Al Viro committed
5687
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5688 5689
}

5690
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
5703 5704 5705

static inline int ext2_feature_set_ok(struct super_block *sb)
{
5706
	if (ext4_has_unknown_ext2_incompat_features(sb))
5707 5708 5709
		return 0;
	if (sb->s_flags & MS_RDONLY)
		return 1;
5710
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
5711 5712 5713
		return 0;
	return 1;
}
5714 5715 5716
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
5717
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
5732 5733 5734

static inline int ext3_feature_set_ok(struct super_block *sb)
{
5735
	if (ext4_has_unknown_ext3_incompat_features(sb))
5736
		return 0;
5737
	if (!ext4_has_feature_journal(sb))
5738 5739 5740
		return 0;
	if (sb->s_flags & MS_RDONLY)
		return 1;
5741
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
5742 5743 5744
		return 0;
	return 1;
}
5745

Theodore Ts'o's avatar
Theodore Ts'o committed
5746 5747 5748
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
Al Viro's avatar
Al Viro committed
5749
	.mount		= ext4_mount,
Theodore Ts'o's avatar
Theodore Ts'o committed
5750 5751 5752
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
5753
MODULE_ALIAS_FS("ext4");
Theodore Ts'o's avatar
Theodore Ts'o committed
5754

5755 5756 5757
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

5758
static int __init ext4_init_fs(void)
5759
{
5760
	int i, err;
5761

5762
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5763 5764 5765
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

5766
	/* Build-time check for flags consistency */
5767
	ext4_check_flag_values();
5768

5769
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
5770 5771
		init_waitqueue_head(&ext4__ioend_wq[i]);

5772
	err = ext4_init_es();
5773 5774
	if (err)
		return err;
5775 5776 5777

	err = ext4_init_pageio();
	if (err)
5778
		goto out5;
5779

5780
	err = ext4_init_system_zone();
5781
	if (err)
5782
		goto out4;
5783

5784
	err = ext4_init_sysfs();
5785
	if (err)
5786
		goto out3;
5787

5788
	err = ext4_init_mballoc();
5789 5790
	if (err)
		goto out2;
5791 5792 5793
	err = init_inodecache();
	if (err)
		goto out1;
5794
	register_as_ext3();
5795
	register_as_ext2();
Theodore Ts'o's avatar
Theodore Ts'o committed
5796
	err = register_filesystem(&ext4_fs_type);
5797 5798
	if (err)
		goto out;
5799

5800 5801
	return 0;
out:
5802 5803
	unregister_as_ext2();
	unregister_as_ext3();
5804 5805
	destroy_inodecache();
out1:
5806
	ext4_exit_mballoc();
5807
out2:
5808 5809
	ext4_exit_sysfs();
out3:
5810
	ext4_exit_system_zone();
5811
out4:
5812
	ext4_exit_pageio();
5813
out5:
5814 5815
	ext4_exit_es();

5816 5817 5818
	return err;
}

5819
static void __exit ext4_exit_fs(void)
5820
{
5821
	ext4_destroy_lazyinit_thread();
5822 5823
	unregister_as_ext2();
	unregister_as_ext3();
Theodore Ts'o's avatar
Theodore Ts'o committed
5824
	unregister_filesystem(&ext4_fs_type);
5825
	destroy_inodecache();
5826
	ext4_exit_mballoc();
5827
	ext4_exit_sysfs();
5828 5829
	ext4_exit_system_zone();
	ext4_exit_pageio();
5830
	ext4_exit_es();
5831 5832 5833
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5834
MODULE_DESCRIPTION("Fourth Extended Filesystem");
5835
MODULE_LICENSE("GPL");
5836 5837
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)