super.c 164 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/super.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
23
#include <linux/vmalloc.h>
24 25 26
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
27
#include <linux/backing-dev.h>
28 29
#include <linux/parser.h>
#include <linux/buffer_head.h>
30
#include <linux/exportfs.h>
31 32 33 34 35 36
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
Theodore Ts'o's avatar
Theodore Ts'o committed
37
#include <linux/ctype.h>
Vignesh Babu's avatar
Vignesh Babu committed
38
#include <linux/log2.h>
39
#include <linux/crc16.h>
Dan Magenheimer's avatar
Dan Magenheimer committed
40
#include <linux/cleancache.h>
41 42
#include <asm/uaccess.h>

43 44 45
#include <linux/kthread.h>
#include <linux/freezer.h>

46
#include "ext4.h"
47
#include "ext4_extents.h"	/* Needed for trace points definition */
48
#include "ext4_jbd2.h"
49 50
#include "xattr.h"
#include "acl.h"
51
#include "mballoc.h"
52

53 54 55
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

56 57
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
58
static struct ratelimit_state ext4_mount_msg_ratelimit;
59

60
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
61
			     unsigned long journal_devnum);
62
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
63
static int ext4_commit_super(struct super_block *sb, int sync);
64 65 66 67
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es);
68
static int ext4_sync_fs(struct super_block *sb, int wait);
69 70
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
71 72
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
Al Viro's avatar
Al Viro committed
73 74
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
75 76
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
77
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
78 79
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
80
static void ext4_clear_request_list(void);
81 82
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
83

Jan Kara's avatar
Jan Kara committed
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
 *   i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
 *   transaction start -> i_data_sem (rw)
 *
 * direct IO:
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
 *   transaction start -> i_data_sem (rw)
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

Jan Kara's avatar
Jan Kara committed
114
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
115 116 117 118 119 120 121
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
122
MODULE_ALIAS_FS("ext2");
123
MODULE_ALIAS("ext2");
124 125 126 127 128 129
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


130 131 132
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
Al Viro's avatar
Al Viro committed
133
	.mount		= ext4_mount,
134 135 136
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
137
MODULE_ALIAS_FS("ext3");
138
MODULE_ALIAS("ext3");
139
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
Laurent Vivier's avatar
Laurent Vivier committed
140

141 142 143
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
144
	if (!ext4_has_feature_metadata_csum(sb))
145 146 147 148 149
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

150 151 152 153 154 155 156 157 158 159 160 161
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

162 163
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
164
{
165
	if (!ext4_has_metadata_csum(sb))
166 167 168 169 170
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

171
void ext4_superblock_csum_set(struct super_block *sb)
172
{
173 174
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

175
	if (!ext4_has_metadata_csum(sb))
176 177 178 179 180
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

181 182 183 184
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
	void *ret;

185
	ret = kmalloc(size, flags | __GFP_NOWARN);
186 187 188 189 190 191 192 193 194
	if (!ret)
		ret = __vmalloc(size, flags, PAGE_KERNEL);
	return ret;
}

void *ext4_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

195
	ret = kzalloc(size, flags | __GFP_NOWARN);
196 197 198 199 200
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}

201 202
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
203
{
204
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
205
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
206
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
207 208
}

209 210
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
211
{
212
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
213
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
214
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
215 216
}

217 218
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
219
{
220
	return le32_to_cpu(bg->bg_inode_table_lo) |
221
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
222
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
223 224
}

225 226
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
227 228 229
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
230
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
231 232 233 234 235 236 237
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
238
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
239 240 241 242 243 244 245
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
246
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
247 248 249 250 251 252 253
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
254
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
255 256
}

257 258
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
259
{
260
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
261 262
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
263 264
}

265 266
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
267
{
268
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
269 270
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
271 272
}

273 274
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
275
{
276
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
277 278
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
279 280
}

281 282
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

313

314 315 316 317 318 319
static void __save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
320 321
	if (bdev_read_only(sb->s_bdev))
		return;
322 323 324 325 326 327 328 329 330 331 332 333
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
	es->s_last_error_time = cpu_to_le32(get_seconds());
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
	}
334 335 336 337 338 339
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
340
	le32_add_cpu(&es->s_error_count, 1);
341 342 343 344 345 346 347 348 349
}

static void save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	__save_error_info(sb, func, line);
	ext4_commit_super(sb, 1);
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

Bobi Jam's avatar
Bobi Jam committed
366 367 368 369 370
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
371
	struct ext4_journal_cb_entry	*jce;
Bobi Jam's avatar
Bobi Jam committed
372

373
	BUG_ON(txn->t_state == T_FINISHED);
Bobi Jam's avatar
Bobi Jam committed
374
	spin_lock(&sbi->s_md_lock);
375 376 377
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
Bobi Jam's avatar
Bobi Jam committed
378 379 380 381 382 383 384
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
385

386 387 388 389
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
390
 * superblock.  That is not possible on ext4, because we may have other
391 392 393 394 395
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
396
 * We'll just use the jbd2_journal_abort() error code to record an error in
397
 * the journal instead.  On recovery, the journal will complain about
398 399 400
 * that error until we've noted it down and cleared it.
 */

401
static void ext4_handle_error(struct super_block *sb)
402 403 404 405
{
	if (sb->s_flags & MS_RDONLY)
		return;

406
	if (!test_opt(sb, ERRORS_CONT)) {
407
		journal_t *journal = EXT4_SB(sb)->s_journal;
408

409
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
410
		if (journal)
411
			jbd2_journal_abort(journal, -EIO);
412
	}
413
	if (test_opt(sb, ERRORS_RO)) {
414
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
415 416 417 418 419
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
420 421
		sb->s_flags |= MS_RDONLY;
	}
422 423 424 425
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
426
		panic("EXT4-fs (device %s): panic forced after error\n",
427
			sb->s_id);
428
	}
429 430
}

431 432 433 434
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

435
void __ext4_error(struct super_block *sb, const char *function,
436
		  unsigned int line, const char *fmt, ...)
437
{
Joe Perches's avatar
Joe Perches committed
438
	struct va_format vaf;
439 440
	va_list args;

441 442 443 444 445 446 447 448 449
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
450
	save_error_info(sb, function, line);
451
	ext4_handle_error(sb);
452 453
}

454 455 456
void __ext4_error_inode(struct inode *inode, const char *function,
			unsigned int line, ext4_fsblk_t block,
			const char *fmt, ...)
457 458
{
	va_list args;
459
	struct va_format vaf;
460
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
461

462 463
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
	es->s_last_error_block = cpu_to_le64(block);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
480
	save_error_info(inode->i_sb, function, line);
481 482 483
	ext4_handle_error(inode->i_sb);
}

484 485 486
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
487 488
{
	va_list args;
489
	struct va_format vaf;
490
	struct ext4_super_block *es;
Al Viro's avatar
Al Viro committed
491
	struct inode *inode = file_inode(file);
492 493
	char pathname[80], *path;

494 495
	es = EXT4_SB(inode->i_sb)->s_es;
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
496
	if (ext4_error_ratelimit(inode->i_sb)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
497
		path = file_path(file, pathname, sizeof(pathname));
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
517
	save_error_info(inode->i_sb, function, line);
518 519 520
	ext4_handle_error(inode->i_sb);
}

521 522
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
523 524 525 526
{
	char *errstr = NULL;

	switch (errno) {
527 528 529 530 531 532
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
533 534 535 536 537 538 539
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
540 541
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

561
/* __ext4_std_error decodes expected errors from journaling functions
562 563
 * automatically and invokes the appropriate error response.  */

564 565
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
566 567 568 569 570 571 572 573 574 575 576
{
	char nbuf[16];
	const char *errstr;

	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
	if (errno == -EROFS && journal_current_handle() == NULL &&
	    (sb->s_flags & MS_RDONLY))
		return;

577 578 579 580 581
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
582

583
	save_error_info(sb, function, line);
584
	ext4_handle_error(sb);
585 586 587
}

/*
588
 * ext4_abort is a much stronger failure handler than ext4_error.  The
589 590 591 592 593 594 595 596
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

597
void __ext4_abort(struct super_block *sb, const char *function,
598
		unsigned int line, const char *fmt, ...)
599
{
600
	struct va_format vaf;
601 602
	va_list args;

603
	save_error_info(sb, function, line);
604
	va_start(args, fmt);
605 606 607 608
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
609 610
	va_end(args);

611 612 613
	if ((sb->s_flags & MS_RDONLY) == 0) {
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
614 615 616 617 618 619
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
		sb->s_flags |= MS_RDONLY;
620 621 622 623
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
		save_error_info(sb, function, line);
	}
624 625 626 627
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
628
		panic("EXT4-fs panic from previous error\n");
629
	}
630 631
}

632 633
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
634
{
Joe Perches's avatar
Joe Perches committed
635
	struct va_format vaf;
636 637
	va_list args;

638 639 640
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

641
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
642 643 644
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
645 646 647
	va_end(args);
}

648 649 650 651
#define ext4_warning_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
			     "EXT4-fs warning")

652
void __ext4_warning(struct super_block *sb, const char *function,
653
		    unsigned int line, const char *fmt, ...)
654
{
Joe Perches's avatar
Joe Perches committed
655
	struct va_format vaf;
656 657
	va_list args;

658
	if (!ext4_warning_ratelimit(sb))
659 660
		return;

661
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
662 663 664 665
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
666 667 668
	va_end(args);
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

687 688 689 690
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
691 692 693
__releases(bitlock)
__acquires(bitlock)
{
Joe Perches's avatar
Joe Perches committed
694
	struct va_format vaf;
695 696 697
	va_list args;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

698 699 700
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	__save_error_info(sb, function, line);
Joe Perches's avatar
Joe Perches committed
701

702 703 704 705 706 707 708 709 710 711 712 713 714 715
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
716 717

	if (test_opt(sb, ERRORS_CONT)) {
718
		ext4_commit_super(sb, 0);
719 720
		return;
	}
721

722
	ext4_unlock_group(sb, grp);
723
	ext4_commit_super(sb, 1);
724 725 726 727 728 729 730
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
Lucas De Marchi's avatar
Lucas De Marchi committed
731
	 * ext4_grp_locked_error() to distinguish between the
732 733 734 735 736 737 738 739
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

740
void ext4_update_dynamic_rev(struct super_block *sb)
741
{
742
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
743

744
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
745 746
		return;

747
	ext4_warning(sb,
748 749
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
750
		     EXT4_DYNAMIC_REV);
751

752 753 754
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
755 756 757 758 759 760 761 762 763 764 765 766 767
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
768
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
769 770 771 772
{
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

773
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
774 775 776 777 778
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
779
	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
780 781 782 783 784 785 786
			__bdevname(dev, b), PTR_ERR(bdev));
	return NULL;
}

/*
 * Release the journal device
 */
Al Viro's avatar
Al Viro committed
787
static void ext4_blkdev_put(struct block_device *bdev)
788
{
Al Viro's avatar
Al Viro committed
789
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
790 791
}

Al Viro's avatar
Al Viro committed
792
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
793 794 795 796
{
	struct block_device *bdev;
	bdev = sbi->journal_bdev;
	if (bdev) {
Al Viro's avatar
Al Viro committed
797
		ext4_blkdev_put(bdev);
798 799 800 801 802 803
		sbi->journal_bdev = NULL;
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
804
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
805 806
}

807
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
808 809 810
{
	struct list_head *l;

811 812
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
813 814 815 816 817 818 819 820 821 822 823 824

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

825
static void ext4_put_super(struct super_block *sb)
826
{
827 828
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
829
	struct buffer_head **group_desc;
830
	int aborted = 0;
831
	int i, err;
832

833
	ext4_unregister_li_request(sb);
834 835
	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);

836 837
	flush_workqueue(sbi->rsv_conversion_wq);
	destroy_workqueue(sbi->rsv_conversion_wq);
838

839
	if (sbi->s_journal) {
840
		aborted = is_journal_aborted(sbi->s_journal);
841 842
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
843
		if ((err < 0) && !aborted)
844
			ext4_abort(sb, "Couldn't clean up the journal");
845
	}
846

847
	ext4_unregister_sysfs(sb);
848
	ext4_es_unregister_shrinker(sbi);
849
	del_timer_sync(&sbi->s_err_report);
850 851 852 853
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

854
	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
855
		ext4_clear_feature_journal_needs_recovery(sb);
856 857
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
858
	if (!(sb->s_flags & MS_RDONLY))
859 860
		ext4_commit_super(sb, 1);

861 862
	rcu_read_lock();
	group_desc = rcu_dereference(sbi->s_group_desc);
863
	for (i = 0; i < sbi->s_gdb_count; i++)
864 865 866
		brelse(group_desc[i]);
	kvfree(group_desc);
	rcu_read_unlock();
Al Viro's avatar
Al Viro committed
867
	kvfree(sbi->s_flex_groups);
868
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
869 870
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
871
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
872
	percpu_free_rwsem(&sbi->s_writepages_rwsem);
873 874
	brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
875
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
876 877 878 879 880 881 882 883 884 885 886
		kfree(sbi->s_qf_names[i]);
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

887
	sync_blockdev(sb->s_bdev);
888
	invalidate_bdev(sb->s_bdev);
889 890 891 892 893 894 895
	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
		sync_blockdev(sbi->journal_bdev);
896
		invalidate_bdev(sbi->journal_bdev);
897
		ext4_blkdev_remove(sbi);
898
	}
899 900 901 902
	if (sbi->s_mb_cache) {
		ext4_xattr_destroy_cache(sbi->s_mb_cache);
		sbi->s_mb_cache = NULL;
	}
903 904
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
905
	sb->s_fs_info = NULL;
Theodore Ts'o's avatar
Theodore Ts'o committed
906 907 908 909 910 911
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
912 913
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
914
	kfree(sbi->s_blockgroup_lock);
915 916 917
	kfree(sbi);
}

918
static struct kmem_cache *ext4_inode_cachep;
919 920 921 922

/*
 * Called inside transaction, so use GFP_NOFS
 */
923
static struct inode *ext4_alloc_inode(struct super_block *sb)
924
{
925
	struct ext4_inode_info *ei;
926

927
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
928 929
	if (!ei)
		return NULL;
930

931
	ei->vfs_inode.i_version = 1;
932
	spin_lock_init(&ei->i_raw_lock);
933 934
	INIT_LIST_HEAD(&ei->i_prealloc_list);
	spin_lock_init(&ei->i_prealloc_lock);
935 936
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
937
	INIT_LIST_HEAD(&ei->i_es_list);
938
	ei->i_es_all_nr = 0;
939
	ei->i_es_shk_nr = 0;
940
	ei->i_es_shrink_lblk = 0;
941 942 943
	ei->i_reserved_data_blocks = 0;
	ei->i_reserved_meta_blocks = 0;
	ei->i_allocated_meta_blocks = 0;
944
	ei->i_da_metadata_calc_len = 0;
945
	ei->i_da_metadata_calc_last_lblock = 0;
946
	spin_lock_init(&(ei->i_block_reservation_lock));
947 948
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
949
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
950
#endif
951
	ei->jinode = NULL;
952
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
953
	spin_lock_init(&ei->i_completed_io_lock);
954 955
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
956
	atomic_set(&ei->i_unwritten, 0);
957
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
958 959 960
	return &ei->vfs_inode;
}

961 962 963 964 965 966 967 968
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

	trace_ext4_drop_inode(inode, drop);
	return drop;
}

Nick Piggin's avatar
Nick Piggin committed
969 970 971 972 973 974
static void ext4_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

975
static void ext4_destroy_inode(struct inode *inode)
976
{
977
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
978 979 980
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
981 982 983 984 985
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
Nick Piggin's avatar
Nick Piggin committed
986
	call_rcu(&inode->i_rcu, ext4_i_callback);
987 988
}

989
static void init_once(void *foo)
990
{
991
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
992

993 994
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
995
	init_rwsem(&ei->i_data_sem);
996
	init_rwsem(&ei->i_mmap_sem);
997
	inode_init_once(&ei->vfs_inode);
998 999
}

1000
static int __init init_inodecache(void)
1001
{
1002 1003
	ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
					     sizeof(struct ext4_inode_info),
1004
					     0, (SLAB_RECLAIM_ACCOUNT|
1005
						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1006
					     init_once);
1007
	if (ext4_inode_cachep == NULL)
1008 1009 1010 1011 1012 1013
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1014 1015 1016 1017 1018
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1019
	kmem_cache_destroy(ext4_inode_cachep);
1020 1021
}

Al Viro's avatar
Al Viro committed
1022
void ext4_clear_inode(struct inode *inode)
1023
{
Al Viro's avatar
Al Viro committed
1024
	invalidate_inode_buffers(inode);
1025
	clear_inode(inode);
1026
	dquot_drop(inode);
1027
	ext4_discard_preallocations(inode);
1028
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1029 1030 1031 1032 1033 1034
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1035
#ifdef CONFIG_EXT4_FS_ENCRYPTION
1036
	fscrypt_put_encryption_info(inode, NULL);
1037
#endif
1038 1039
}

Christoph Hellwig's avatar
Christoph Hellwig committed
1040
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1041
					u64 ino, u32 generation)
1042 1043 1044
{
	struct inode *inode;

1045
	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1046
		return ERR_PTR(-ESTALE);
1047
	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1048 1049 1050 1051
		return ERR_PTR(-ESTALE);

	/* iget isn't really right if the inode is currently unallocated!!
	 *
1052
	 * ext4_read_inode will return a bad_inode if the inode had been
1053 1054 1055 1056 1057
	 * deleted, so we should be safe.
	 *
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1058
	inode = ext4_iget_normal(sb, ino);
1059 1060 1061
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1062 1063 1064
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
Christoph Hellwig's avatar
Christoph Hellwig committed
1065 1066 1067 1068 1069

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1070
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1071 1072 1073 1074 1075 1076
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1077
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1078 1079 1080
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1081 1082
}

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
static int ext4_nfs_commit_metadata(struct inode *inode)
{
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL
	};

	trace_ext4_nfs_commit_metadata(inode);
	return ext4_write_inode(inode, &wbc);
}

1093 1094 1095 1096 1097 1098
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1099 1100
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1101 1102 1103 1104 1105 1106 1107 1108
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, page,
1109
						wait & ~__GFP_DIRECT_RECLAIM);
1110 1111 1112
	return try_to_free_buffers(page);
}

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_key_prefix(struct inode *inode, u8 **key)
{
	*key = EXT4_SB(inode->i_sb)->key_prefix;
	return EXT4_SB(inode->i_sb)->key_prefix_size;
}

static int ext4_prepare_context(struct inode *inode)
{
	return ext4_convert_inline_data(inode);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
	handle_t *handle;
	int res, res2;

	/* fs_data is null when internally used. */
	if (fs_data) {
		res  = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
				len, 0);
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
		}
		return res;
	}

	handle = ext4_journal_start(inode, EXT4_HT_MISC,
			ext4_jbd2_credits_xattr(inode));
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
			EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
			len, 0);
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
	if (!res)
		res = res2;
	return res;
}

static int ext4_dummy_context(struct inode *inode)
{
	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}

static unsigned ext4_max_namelen(struct inode *inode)
{
	return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
		EXT4_NAME_LEN;
}

static struct fscrypt_operations ext4_cryptops = {
	.get_context		= ext4_get_context,
	.key_prefix		= ext4_key_prefix,
	.prepare_context	= ext4_prepare_context,
	.set_context		= ext4_set_context,
	.dummy_context		= ext4_dummy_context,
	.is_encrypted		= ext4_encrypted_inode,
	.empty_dir		= ext4_empty_dir,
	.max_namelen		= ext4_max_namelen,
};
#else
static struct fscrypt_operations ext4_cryptops = {
	.is_encrypted		= ext4_encrypted_inode,
};
#endif

1197
#ifdef CONFIG_QUOTA
Li Xi's avatar
Li Xi committed
1198 1199
static char *quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
1200

1201 1202 1203 1204 1205
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1206
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1207
			 struct path *path);
1208
static int ext4_quota_off(struct super_block *sb, int type);
1209 1210
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1211
			       size_t len, loff_t off);
1212
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1213
				const char *data, size_t len, loff_t off);
1214 1215 1216
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1217
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1218

1219 1220 1221 1222 1223
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1224
static const struct dquot_operations ext4_quota_operations = {
1225
	.get_reserved_space = ext4_get_reserved_space,
1226 1227 1228 1229
	.write_dquot	= ext4_write_dquot,
	.acquire_dquot	= ext4_acquire_dquot,
	.release_dquot	= ext4_release_dquot,
	.mark_dirty	= ext4_mark_dquot_dirty,
1230 1231 1232
	.write_info	= ext4_write_info,
	.alloc_dquot	= dquot_alloc,
	.destroy_dquot	= dquot_destroy,
Li Xi's avatar
Li Xi committed
1233
	.get_projid	= ext4_get_projid,
1234
	.get_next_id	= ext4_get_next_id,
1235 1236
};

1237
static const struct quotactl_ops ext4_qctl_operations = {
1238
	.quota_on	= ext4_quota_on,
1239
	.quota_off	= ext4_quota_off,
1240
	.quota_sync	= dquot_quota_sync,
1241
	.get_state	= dquot_get_state,
1242 1243
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1244 1245
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1246 1247 1248
};
#endif

1249
static const struct super_operations ext4_sops = {
1250 1251 1252 1253
	.alloc_inode	= ext4_alloc_inode,
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1254
	.drop_inode	= ext4_drop_inode,
Al Viro's avatar
Al Viro committed
1255
	.evict_inode	= ext4_evict_inode,
1256 1257
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1258 1259
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1260 1261 1262
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1263
#ifdef CONFIG_QUOTA
1264 1265
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
1266
	.get_dquots	= ext4_get_dquots,
1267
#endif
1268
	.bdev_try_to_free_page = bdev_try_to_free_page,
1269 1270
};

1271
static const struct export_operations ext4_export_ops = {
Christoph Hellwig's avatar
Christoph Hellwig committed
1272 1273
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1274
	.get_parent = ext4_get_parent,
1275
	.commit_metadata = ext4_nfs_commit_metadata,
1276 1277 1278 1279 1280
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1281
	Opt_nouid32, Opt_debug, Opt_removed,
1282
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1283
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1284 1285
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1286
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1287
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1288
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1289
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Theodore Ts'o's avatar
Theodore Ts'o committed
1290
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1291
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1292
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1293
	Opt_lazytime, Opt_nolazytime,
1294
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1295
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1296
	Opt_dioread_nolock, Opt_dioread_lock,
1297
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1298
	Opt_max_dir_size_kb, Opt_nojournal_checksum,
1299 1300
};

1301
static const match_table_t tokens = {
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1316 1317
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1318 1319 1320 1321
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1322
	{Opt_noload, "norecovery"},
1323
	{Opt_noload, "noload"},
1324 1325
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1326
	{Opt_commit, "commit=%u"},
1327 1328
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1329
	{Opt_journal_dev, "journal_dev=%u"},
1330
	{Opt_journal_path, "journal_path=%s"},
1331
	{Opt_journal_checksum, "journal_checksum"},
1332
	{Opt_nojournal_checksum, "nojournal_checksum"},
1333
	{Opt_journal_async_commit, "journal_async_commit"},
1334 1335 1336 1337
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1338 1339
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1340 1341 1342 1343 1344 1345
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1346
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1347 1348 1349 1350
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1351
	{Opt_prjquota, "prjquota"},
1352
	{Opt_barrier, "barrier=%u"},
Theodore Ts'o's avatar
Theodore Ts'o committed
1353 1354
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1355
	{Opt_i_version, "i_version"},
Ross Zwisler's avatar
Ross Zwisler committed
1356
	{Opt_dax, "dax"},
1357
	{Opt_stripe, "stripe=%u"},
1358
	{Opt_delalloc, "delalloc"},
1359 1360
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1361
	{Opt_nodelalloc, "nodelalloc"},
1362 1363
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1364 1365
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1366
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1367
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1368
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
Theodore Ts'o's avatar
Theodore Ts'o committed
1369 1370
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1371 1372
	{Opt_dioread_nolock, "dioread_nolock"},
	{Opt_dioread_lock, "dioread_lock"},
1373 1374
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1375 1376 1377
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1378
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1379
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1380 1381 1382 1383 1384
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
Josef Bacik's avatar
Josef Bacik committed
1385
	{Opt_err, NULL},
1386 1387
};

1388
static ext4_fsblk_t get_sb_block(void **data)
1389
{
1390
	ext4_fsblk_t	sb_block;
1391 1392 1393 1394
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1395

1396
	options += 3;
1397
	/* TODO: use simple_strtoll with >32bit ext4 */
1398 1399
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1400
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1401 1402 1403 1404 1405 1406
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1407

1408 1409 1410
	return sb_block;
}

1411
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1412 1413
static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1414

Dmitry Monakhov's avatar
Dmitry Monakhov committed
1415 1416 1417 1418 1419
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *qname;
1420
	int ret = -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1421 1422 1423 1424 1425 1426

	if (sb_any_quota_loaded(sb) &&
		!sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1427
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1428
	}
1429
	if (ext4_has_feature_quota(sb)) {
1430 1431 1432
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1433
	}
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1434 1435 1436 1437
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1438
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1439
	}
1440 1441 1442 1443 1444 1445 1446 1447
	if (sbi->s_qf_names[qtype]) {
		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1448
	}
1449
	if (strchr(qname, '/')) {
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1450 1451
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1452
		goto errout;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1453
	}
1454
	sbi->s_qf_names[qtype] = qname;
1455
	set_opt(sb, QUOTA);
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1456
	return 1;
1457 1458 1459
errout:
	kfree(qname);
	return ret;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sb_any_quota_loaded(sb) &&
		sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1471
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1472
	}
1473
	kfree(sbi->s_qf_names[qtype]);
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1474 1475 1476 1477 1478
	sbi->s_qf_names[qtype] = NULL;
	return 1;
}
#endif

1479 1480 1481 1482 1483 1484
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1485
#ifdef CONFIG_QUOTA
1486 1487 1488 1489 1490
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1491
#endif
1492
#define MOPT_DATAJ	0x0080
1493 1494 1495
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1496
#define MOPT_STRING	0x0400
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1509 1510 1511 1512
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1513 1514
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1515 1516 1517
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1518
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1519 1520
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1521
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1522
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1523
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1524
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1525
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1526
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1527 1528 1529
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1530
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1531
	 MOPT_NO_EXT2},
1532
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1533
	 MOPT_NO_EXT2},
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
Ross Zwisler's avatar
Ross Zwisler committed
1544
	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1545
	{Opt_stripe, 0, MOPT_GTE0},
1546 1547
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1548 1549 1550
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1551 1552 1553 1554
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1555 1556
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
Theodore Ts'o's avatar
Theodore Ts'o committed
1557
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1558 1559
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1560
#else
1561 1562
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1563
#endif
1564 1565 1566 1567 1568 1569 1570
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
1571 1572
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
1573
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1574 1575
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
1576 1577 1578 1579 1580 1581 1582
	{Opt_usrjquota, 0, MOPT_Q},
	{Opt_grpjquota, 0, MOPT_Q},
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1583
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1584
	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
1585 1586 1587 1588 1589 1590 1591 1592 1593
	{Opt_err, 0, 0}
};

static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
1594 1595
	kuid_t uid;
	kgid_t gid;
1596 1597
	int arg = 0;

1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
1608
	switch (token) {
1609 1610 1611 1612
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
1613 1614 1615
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
1616
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1617 1618 1619 1620 1621 1622 1623
		return 1;
	case Opt_abort:
		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
		return 1;
	case Opt_i_version:
		sb->s_flags |= MS_I_VERSION;
		return 1;
1624 1625 1626 1627 1628 1629
	case Opt_lazytime:
		sb->s_flags |= MS_LAZYTIME;
		return 1;
	case Opt_nolazytime:
		sb->s_flags &= ~MS_LAZYTIME;
		return 1;
1630 1631
	}

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

1653
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1654 1655 1656
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
1657 1658 1659
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
1660 1661
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1662 1663 1664
		} else
			return -1;
	}
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
		sbi->s_commit_interval = HZ * arg;
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
1684 1685 1686 1687
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
1688
			return -1;
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1704 1705
			return -1;
		}
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

1747
		journal_inode = d_inode(path.dentry);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
1759 1760 1761 1762 1763 1764 1765 1766
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1767 1768 1769 1770 1771 1772 1773 1774 1775
	} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mode enabled");
#else
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mount option ignored");
#endif
1776 1777 1778 1779 1780
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1781
				ext4_msg(sb, KERN_ERR,
1782 1783
					 "Cannot change data mode on remount");
				return -1;
1784
			}
1785
		} else {
1786 1787
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
1788
		}
1789 1790 1791 1792 1793 1794 1795 1796
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
1797
		if (ext4_has_feature_quota(sb)) {
1798 1799
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
1800
				 "when QUOTA feature is enabled");
1801
			return 1;
1802
		}
1803
		sbi->s_jquota_fmt = m->mount_opt;
Ross Zwisler's avatar
Ross Zwisler committed
1804 1805
#endif
	} else if (token == Opt_dax) {
1806 1807 1808 1809 1810
#ifdef CONFIG_FS_DAX
		ext4_msg(sb, KERN_WARNING,
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= m->mount_opt;
#else
Ross Zwisler's avatar
Ross Zwisler committed
1811 1812
		ext4_msg(sb, KERN_INFO, "dax option not supported");
		return -1;
1813
#endif
1814 1815 1816 1817
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
		if (arg != 0)
			sbi->s_mount_opt |= m->mount_opt;
		else
			sbi->s_mount_opt &= ~m->mount_opt;
1833
	}
1834
	return 1;
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
1857
		args[0].to = args[0].from = NULL;
1858 1859 1860 1861
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
1862 1863
	}
#ifdef CONFIG_QUOTA
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1875
		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1876
			clear_opt(sb, USRQUOTA);
1877

1878
		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1879
			clear_opt(sb, GRPQUOTA);
1880

Dmitry Monakhov's avatar
Dmitry Monakhov committed
1881
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1882 1883
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
1884 1885 1886 1887
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
1888 1889
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
1890 1891 1892 1893
			return 0;
		}
	}
#endif
1894 1895 1896 1897
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

1898
		if (blocksize < PAGE_SIZE) {
1899 1900 1901 1902 1903
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "dioread_nolock if block size != PAGE_SIZE");
			return 0;
		}
	}
1904 1905 1906 1907 1908 1909
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
			 "in data=ordered mode");
		return 0;
	}
1910 1911 1912
	return 1;
}

1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

	if (sbi->s_qf_names[USRQUOTA])
1937
		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1938 1939

	if (sbi->s_qf_names[GRPQUOTA])
1940
		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
1941 1942 1943
#endif
}

1944 1945
static const char *token2str(int token)
{
1946
	const struct match_token *t;
1947 1948 1949 1950 1951 1952 1953

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

1954 1955 1956 1957 1958
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
1959 1960
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
1961 1962 1963
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
1964
	int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
1965
	const struct mount_opts *m;
1966
	char sep = nodefs ? '\n' : ',';
1967

1968 1969
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
1970 1971

	if (sbi->s_sb_block != 1)
1972 1973 1974 1975 1976 1977 1978
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
		    (m->flags & MOPT_CLEAR_ERR))
			continue;
1979
		if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
1980 1981 1982 1983 1984 1985
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
1986
	}
1987

1988
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
1989
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
1990 1991 1992
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
1993
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
1994 1995
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
1996
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
1997 1998
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
1999
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2000
		SEQ_OPTS_PUTS("errors=continue");
2001
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2002
		SEQ_OPTS_PUTS("errors=panic");
2003
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2004
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2005
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2006
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2007
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2008
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2009
	if (sb->s_flags & MS_I_VERSION)
2010
		SEQ_OPTS_PUTS("i_version");
2011
	if (nodefs || sbi->s_stripe)
2012
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2013
	if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
2014 2015 2016 2017 2018 2019 2020
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2021 2022
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2023 2024
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2025

2026 2027
	if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2028
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2029 2030
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2031 2032
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2033 2034
	if (DUMMY_ENCRYPTION_ENABLED(sbi))
		SEQ_OPTS_PUTS("test_dummy_encryption");
2035 2036 2037 2038 2039

	ext4_show_quota_options(seq, sb);
	return 0;
}

2040 2041 2042 2043 2044
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2045
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
{
	struct super_block *sb = seq->private;
	int rc;

	seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2056
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2057 2058
			    int read_only)
{
2059
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2060 2061
	int res = 0;

2062
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2063 2064
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2065 2066 2067
		res = MS_RDONLY;
	}
	if (read_only)
2068
		goto done;
2069
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2070 2071
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2072
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2073 2074 2075
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2076
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2077 2078
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2079 2080 2081
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2082 2083 2084
	else if (le32_to_cpu(es->s_checkinterval) &&
		(le32_to_cpu(es->s_lastcheck) +
			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
2085 2086 2087
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2088
	if (!sbi->s_journal)
2089
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2090
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2091
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
Marcin Slusarz's avatar
Marcin Slusarz committed
2092
	le16_add_cpu(&es->s_mnt_count, 1);
2093
	es->s_mtime = cpu_to_le32(get_seconds());
2094
	ext4_update_dynamic_rev(sb);
2095
	if (sbi->s_journal)
2096
		ext4_set_feature_journal_needs_recovery(sb);
2097

2098
	ext4_commit_super(sb, 1);
2099
done:
2100
	if (test_opt(sb, DEBUG))
2101
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2102
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2103 2104
			sb->s_blocksize,
			sbi->s_groups_count,
2105 2106
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2107
			sbi->s_mount_opt, sbi->s_mount_opt2);
2108

Dan Magenheimer's avatar
Dan Magenheimer committed
2109
	cleancache_init_fs(sb);
2110 2111 2112
	return res;
}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct flex_groups *new_groups;
	int size;

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
	new_groups = ext4_kvzalloc(size, GFP_KERNEL);
	if (!new_groups) {
		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
			 size / (int) sizeof(struct flex_groups));
		return -ENOMEM;
	}

	if (sbi->s_flex_groups) {
		memcpy(new_groups, sbi->s_flex_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups)));
Al Viro's avatar
Al Viro committed
2138
		kvfree(sbi->s_flex_groups);
2139 2140 2141 2142 2143 2144
	}
	sbi->s_flex_groups = new_groups;
	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
	return 0;
}

2145 2146 2147 2148 2149
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t flex_group;
2150
	int i, err;
2151

2152
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2153
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2154 2155 2156 2157
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2158 2159
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2160
		goto failed;
2161 2162

	for (i = 0; i < sbi->s_groups_count; i++) {
2163
		gdp = ext4_get_group_desc(sb, i, NULL);
2164 2165

		flex_group = ext4_flex_group(sbi, i);
2166 2167
		atomic_add(ext4_free_inodes_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].free_inodes);
2168 2169
		atomic64_add(ext4_free_group_clusters(sb, gdp),
			     &sbi->s_flex_groups[flex_group].free_clusters);
2170 2171
		atomic_add(ext4_used_dirs_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].used_dirs);
2172 2173 2174 2175 2176 2177 2178
	}

	return 1;
failed:
	return 0;
}

2179
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2180
				   struct ext4_group_desc *gdp)
2181
{
2182
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
2183
	__u16 crc = 0;
2184
	__le32 le_group = cpu_to_le32(block_group);
2185
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2186

2187
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2188 2189
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2190
		__u16 dummy_csum = 0;
2191 2192 2193

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2194 2195 2196 2197 2198 2199 2200
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2201 2202 2203

		crc = csum32 & 0xFFFF;
		goto out;
2204 2205
	}

2206
	/* old crc16 code */
2207
	if (!ext4_has_feature_gdt_csum(sb))
2208 2209
		return 0;

2210 2211 2212 2213 2214
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2215
	if (ext4_has_feature_64bit(sb) &&
2216 2217 2218 2219 2220 2221
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
2222 2223 2224
	return cpu_to_le16(crc);
}

2225
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2226 2227
				struct ext4_group_desc *gdp)
{
2228
	if (ext4_has_group_desc_csum(sb) &&
2229
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2230 2231 2232 2233 2234
		return 0;

	return 1;
}

2235 2236 2237 2238 2239
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2240
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2241 2242
}

2243
/* Called at mount-time, super-block is locked */
2244
static int ext4_check_descriptors(struct super_block *sb,
2245
				  ext4_fsblk_t sb_block,
2246
				  ext4_group_t *first_not_zeroed)
2247
{
2248 2249 2250
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
2251
	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
Laurent Vivier's avatar
Laurent Vivier committed
2252 2253 2254
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
2255
	int flexbg_flag = 0;
2256
	ext4_group_t i, grp = sbi->s_groups_count;
2257

2258
	if (ext4_has_feature_flex_bg(sb))
2259 2260
		flexbg_flag = 1;

2261
	ext4_debug("Checking group descriptors");
2262

2263 2264 2265
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

2266
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
Laurent Vivier's avatar
Laurent Vivier committed
2267
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2268 2269
		else
			last_block = first_block +
2270
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2271

2272 2273 2274 2275
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2276
		block_bitmap = ext4_block_bitmap(sb, gdp);
2277 2278 2279 2280
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
2281 2282
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
2283
		}
2284 2285 2286 2287 2288 2289 2290 2291
		if (block_bitmap >= sb_block + 1 &&
		    block_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
		}
2292
		if (block_bitmap < first_block || block_bitmap > last_block) {
2293
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2294
			       "Block bitmap for group %u not in group "
2295
			       "(block %llu)!", i, block_bitmap);
2296 2297
			return 0;
		}
2298
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2299 2300 2301 2302
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
2303 2304
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
2305
		}
2306 2307 2308 2309 2310 2311 2312 2313
		if (inode_bitmap >= sb_block + 1 &&
		    inode_bitmap <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "block group descriptors", i);
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
		}
2314
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2315
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2316
			       "Inode bitmap for group %u not in group "
2317
			       "(block %llu)!", i, inode_bitmap);
2318 2319
			return 0;
		}
2320
		inode_table = ext4_inode_table(sb, gdp);
2321 2322 2323 2324
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
2325 2326
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
2327
		}
2328 2329 2330 2331 2332 2333 2334 2335
		if (inode_table >= sb_block + 1 &&
		    inode_table <= last_bg_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "block group descriptors", i);
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
		}
Laurent Vivier's avatar
Laurent Vivier committed
2336
		if (inode_table < first_block ||
2337
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2338
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2339
			       "Inode table for group %u not in group "
2340
			       "(block %llu)!", i, inode_table);
2341 2342
			return 0;
		}
2343
		ext4_lock_group(sb, i);
2344
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2345 2346
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2347
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2348
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2349
			if (!(sb->s_flags & MS_RDONLY)) {
2350
				ext4_unlock_group(sb, i);
2351
				return 0;
2352
			}
2353
		}
2354
		ext4_unlock_group(sb, i);
2355 2356
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2357
	}
2358 2359
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2360 2361 2362
	return 1;
}

2363
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2376
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2377 2378 2379
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2380 2381
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2382 2383 2384 2385
{
	unsigned int s_flags = sb->s_flags;
	int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
2386
	int quota_update = 0;
2387 2388 2389 2390 2391 2392 2393
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2394
	if (bdev_read_only(sb->s_bdev)) {
2395 2396
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2397 2398 2399
		return;
	}

2400 2401 2402 2403 2404 2405 2406
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2407
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2408 2409
		/* don't clear list on RO mount w/ errors */
		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
2410
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2411
				  "clearing orphan list.\n");
2412 2413
			es->s_last_orphan = 0;
		}
2414 2415 2416 2417 2418
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

	if (s_flags & MS_RDONLY) {
2419
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2420 2421 2422 2423 2424
		sb->s_flags &= ~MS_RDONLY;
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
	sb->s_flags |= MS_ACTIVE;
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
	if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
Jan Kara's avatar
Jan Kara committed
2441
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2442 2443
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
2444 2445 2446 2447

			if (!ret)
				quota_update = 1;
			else
2448 2449
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
2450
					"quota: type %d: error %d", i, ret);
2451 2452 2453 2454 2455 2456 2457
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

2468 2469
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
2470 2471 2472 2473
			es->s_last_orphan = 0;
			break;
		}

2474
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2475
		dquot_initialize(inode);
2476
		if (inode->i_nlink) {
2477 2478 2479 2480
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
2481
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2482
				  inode->i_ino, inode->i_size);
Al Viro's avatar
Al Viro committed
2483
			inode_lock(inode);
2484
			truncate_inode_pages(inode->i_mapping, inode->i_size);
2485
			ext4_truncate(inode);
Al Viro's avatar
Al Viro committed
2486
			inode_unlock(inode);
2487 2488
			nr_truncates++;
		} else {
2489 2490 2491 2492
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
2493 2494 2495 2496 2497 2498 2499
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

2500
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2501 2502

	if (nr_orphans)
2503 2504
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
2505
	if (nr_truncates)
2506 2507
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
2508
#ifdef CONFIG_QUOTA
2509 2510 2511 2512 2513 2514
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
2515 2516 2517 2518
	}
#endif
	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
}
2519

2520 2521 2522 2523 2524 2525 2526
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
2527 2528 2529 2530 2531 2532
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
2533 2534
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
2535
static loff_t ext4_max_size(int blkbits, int has_huge_files)
2536 2537 2538 2539 2540
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

	/* small i_blocks in vfs inode? */
2541
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2542
		/*
2543
		 * CONFIG_LBDAF is not enabled implies the inode
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
		 * i_block represent total blocks in 512 bytes
		 * 32 == size of vfs inode i_blocks * 8
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

2554 2555 2556 2557 2558 2559
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
2560 2561 2562 2563 2564 2565 2566 2567
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
2568 2569

/*
2570
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2571 2572
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
2573
 */
2574
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2575
{
2576
	loff_t res = EXT4_NDIR_BLOCKS;
2577 2578
	int meta_blocks;
	loff_t upper_limit;
2579 2580 2581 2582 2583 2584
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
2585 2586
	 */

2587
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2588
		/*
2589
		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2590 2591
		 * the inode i_block field represents total file blocks in
		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2592 2593 2594 2595 2596 2597 2598
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
2599 2600 2601 2602 2603 2604
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
2618 2619 2620 2621 2622 2623 2624

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
2625 2626 2627 2628

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

2629 2630 2631
	return res;
}

2632
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2633
				   ext4_fsblk_t logical_sb_block, int nr)
2634
{
2635
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2636
	ext4_group_t bg, first_meta_bg;
2637 2638 2639 2640
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

2641
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2642
		return logical_sb_block + nr + 1;
2643
	bg = sbi->s_desc_per_block * nr;
2644
	if (ext4_bg_has_super(sb, bg))
2645
		has_super = 1;
2646

2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
		has_super++;

2657
	return (has_super + ext4_group_first_block_no(sb, bg));
2658 2659
}

2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2676
	int ret;
2677 2678

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2679
		ret = sbi->s_stripe;
2680
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
2681
		ret = stripe_width;
2682
	else if (stride && stride <= sbi->s_blocks_per_group)
2683 2684 2685
		ret = stride;
	else
		ret = 0;
2686

2687 2688 2689 2690 2691 2692
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
2693

2694
	return ret;
2695
}
2696

2697 2698 2699 2700 2701 2702 2703 2704
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
2705
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

	if (readonly)
		return 1;

2717
	if (ext4_has_feature_readonly(sb)) {
2718 2719 2720 2721 2722
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
		sb->s_flags |= MS_RDONLY;
		return 1;
	}

2723
	/* Check that feature set is OK for a read-write mount */
2724
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2725 2726 2727 2728 2729 2730 2731 2732 2733 2734
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
	/*
	 * Large file size enabled file system can only be mounted
	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
	 */
2735
	if (ext4_has_feature_huge_file(sb)) {
2736 2737 2738 2739 2740 2741 2742
		if (sizeof(blkcnt_t) < sizeof(u64)) {
			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
				 "cannot be mounted RDWR without "
				 "CONFIG_LBDAF");
			return 0;
		}
	}
2743
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2744 2745 2746 2747 2748
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
2749

2750
#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
2751 2752
	if (!readonly && (ext4_has_feature_quota(sb) ||
			  ext4_has_feature_project(sb))) {
2753
		ext4_msg(sb, KERN_ERR,
2754
			 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
Li Xi's avatar
Li Xi committed
2755 2756
		return 0;
	}
2757
#endif  /* CONFIG_QUOTA */
2758 2759 2760
	return 1;
}

2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
static void print_daily_error_info(unsigned long arg)
{
	struct super_block *sb = (struct super_block *) arg;
	struct ext4_sb_info *sbi;
	struct ext4_super_block *es;

	sbi = EXT4_SB(sb);
	es = sbi->s_es;

	if (es->s_error_count)
2775 2776
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2777 2778
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
2779
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2780 2781 2782 2783 2784
		       sb->s_id, le32_to_cpu(es->s_first_error_time),
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
2785
			printk(KERN_CONT ": inode %u",
2786 2787
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
2788
			printk(KERN_CONT ": block %llu", (unsigned long long)
2789
			       le64_to_cpu(es->s_first_error_block));
2790
		printk(KERN_CONT "\n");
2791 2792
	}
	if (es->s_last_error_time) {
2793
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2794 2795 2796 2797 2798
		       sb->s_id, le32_to_cpu(es->s_last_error_time),
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
2799
			printk(KERN_CONT ": inode %u",
2800 2801
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
2802
			printk(KERN_CONT ": block %llu", (unsigned long long)
2803
			       le64_to_cpu(es->s_last_error_block));
2804
		printk(KERN_CONT "\n");
2805 2806 2807 2808
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t group, ngroups;
	struct super_block *sb;
	unsigned long timeout = 0;
	int ret = 0;

	sb = elr->lr_super;
	ngroups = EXT4_SB(sb)->s_groups_count;

	for (group = elr->lr_next_group; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

2832
	if (group >= ngroups)
2833 2834 2835 2836 2837 2838 2839
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
		if (elr->lr_timeout == 0) {
2840 2841
			timeout = (jiffies - timeout) *
				  elr->lr_sbi->s_li_wait_mult;
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
2852
 * request structure. Should be called with li_list_mtx held
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	struct ext4_sb_info *sbi;

	if (!elr)
		return;

	sbi = elr->lr_sbi;

	list_del(&elr->lr_request);
	sbi->s_li_request = NULL;
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
2870 2871 2872
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
2873
		return;
2874
	}
2875 2876

	mutex_lock(&ext4_li_info->li_list_mtx);
2877
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2878
	mutex_unlock(&ext4_li_info->li_list_mtx);
2879
	mutex_unlock(&ext4_li_mtx);
2880 2881
}

2882 2883
static struct task_struct *ext4_lazyinit_task;

2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
2898
	unsigned long next_wakeup, cur;
2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
2912 2913
			int err = 0;
			int progress = 0;
2914 2915 2916
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
2935
				}
2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
2947 2948 2949 2950 2951 2952
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

2953
		try_to_freeze();
2954

2955 2956
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
2957
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
2958 2959 2960 2961
			cond_resched();
			continue;
		}

2962 2963
		schedule_timeout_interruptible(next_wakeup - cur);

2964 2965 2966 2967
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3010 3011 3012 3013
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3014 3015 3016
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3017
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

3037 3038 3039
	if (!ext4_has_group_desc_csum(sb))
		return ngroups;

3040 3041 3042 3043 3044
	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

3045
		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
	elr->lr_sbi = sbi;
	elr->lr_next_group = start;

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3089 3090
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3091 3092 3093
	return elr;
}

3094 3095
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3096 3097
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3098
	struct ext4_li_request *elr = NULL;
3099
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3100
	int ret = 0;
3101

3102
	mutex_lock(&ext4_li_mtx);
3103 3104 3105 3106 3107 3108
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3109
		goto out;
3110
	}
3111 3112 3113

	if (first_not_zeroed == ngroups ||
	    (sb->s_flags & MS_RDONLY) ||
3114
	    !test_opt(sb, INIT_INODE_TABLE))
3115
		goto out;
3116 3117

	elr = ext4_li_request_new(sb, first_not_zeroed);
3118 3119 3120 3121
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3134 3135 3136 3137 3138 3139
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3140 3141 3142 3143 3144 3145 3146

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3147 3148
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3163
	if (!ext4_li_info || !ext4_lazyinit_task)
3164 3165
		return;

3166
	kthread_stop(ext4_lazyinit_task);
3167 3168
}

3169 3170 3171 3172 3173 3174
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3175
	if (ext4_has_metadata_csum(sb)) {
3176
		/* journal checksum v3 */
3177
		compat = 0;
3178
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3179 3180 3181 3182 3183 3184
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3185 3186 3187 3188
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3201 3202
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3203 3204 3205 3206 3207
	}

	return ret;
}

3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3232
	if (!ext4_has_feature_bigalloc(sb))
3233 3234 3235
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3265 3266 3267 3268 3269
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3270
		}
3271 3272 3273
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3288 3289
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3290 3291
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3292
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3320 3321 3322 3323 3324

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3325
	if (sbi->s_journal && !sbi->journal_bdev)
3326
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3337 3338 3339 3340 3341 3342
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3343 3344 3345 3346
static void ext4_clamp_want_extra_isize(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3347 3348
	unsigned def_extra_isize = sizeof(struct ext4_inode) -
						EXT4_GOOD_OLD_INODE_SIZE;
3349

3350 3351 3352 3353 3354 3355
	if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
		sbi->s_want_extra_isize = 0;
		return;
	}
	if (sbi->s_want_extra_isize < 4) {
		sbi->s_want_extra_isize = def_extra_isize;
3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
		if (ext4_has_feature_extra_isize(sb)) {
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_want_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_want_extra_isize);
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_min_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_min_extra_isize);
		}
	}
	/* Check if enough inode space is available */
3368 3369 3370 3371
	if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
	    (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
							sbi->s_inode_size)) {
		sbi->s_want_extra_isize = def_extra_isize;
3372 3373 3374 3375 3376
		ext4_msg(sb, KERN_INFO,
			 "required extra inode space not available");
	}
}

3377
static void ext4_set_resv_clusters(struct super_block *sb)
Lukas Czerner's avatar
Lukas Czerner committed
3378 3379
{
	ext4_fsblk_t resv_clusters;
3380
	struct ext4_sb_info *sbi = EXT4_SB(sb);
Lukas Czerner's avatar
Lukas Czerner committed
3381

3382 3383 3384 3385 3386 3387
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3388
	if (!ext4_has_feature_extents(sb))
3389
		return;
Lukas Czerner's avatar
Lukas Czerner committed
3390 3391 3392 3393
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3394
	 * unwritten extents in delalloc path. In most cases such
Lukas Czerner's avatar
Lukas Czerner committed
3395 3396 3397
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3398 3399
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
Lukas Czerner's avatar
Lukas Czerner committed
3400 3401 3402 3403

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3404
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
Lukas Czerner's avatar
Lukas Czerner committed
3405 3406
}

3407
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3408
{
3409
	char *orig_data = kstrdup(data, GFP_KERNEL);
3410
	struct buffer_head *bh, **group_desc;
3411
	struct ext4_super_block *es = NULL;
3412
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3413 3414
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3415
	ext4_fsblk_t logical_sb_block;
3416 3417 3418 3419
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3420
	const char *descr;
3421
	int ret = -ENOMEM;
3422
	int blocksize, clustersize;
3423 3424
	unsigned int db_count;
	unsigned int i;
3425
	int needs_recovery, has_huge_files, has_bigalloc;
Laurent Vivier's avatar
Laurent Vivier committed
3426
	__u64 blocks_count;
3427
	int err = 0;
3428
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3429
	ext4_group_t first_not_zeroed;
3430

3431 3432
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3433 3434 3435

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3436 3437 3438
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3439
	sb->s_fs_info = sbi;
3440
	sbi->s_sb = sb;
3441
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
Miklos Szeredi's avatar
Miklos Szeredi committed
3442
	sbi->s_sb_block = sb_block;
3443 3444 3445
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3446

3447
	/* Cleanup superblock name */
3448
	strreplace(sb->s_id, '/', '!');
3449

3450
	/* -EINVAL is default */
3451
	ret = -EINVAL;
3452
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3453
	if (!blocksize) {
3454
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3455 3456 3457 3458
		goto out_fail;
	}

	/*
3459
	 * The ext4 superblock will not be buffer aligned for other than 1kB
3460 3461
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
3462
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3463 3464
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3465
	} else {
3466
		logical_sb_block = sb_block;
3467 3468
	}

3469
	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3470
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3471 3472 3473 3474
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
3475
	 *       some ext4 macro-instructions depend on its value
3476
	 */
3477
	es = (struct ext4_super_block *) (bh->b_data + offset);
3478 3479
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
3480 3481
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
3482
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3483

3484
	/* Warn if metadata_csum and gdt_csum are both set. */
3485 3486
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
3487
		ext4_warning(sb, "metadata_csum and uninit_bg are "
3488 3489
			     "redundant flags; please run fsck.");

3490 3491 3492 3493 3494 3495 3496 3497
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

3498
	/* Load the checksum driver */
3499
	if (ext4_has_feature_metadata_csum(sb)) {
3500 3501 3502 3503 3504 3505 3506 3507 3508
		sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
		if (IS_ERR(sbi->s_chksum_driver)) {
			ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
			ret = PTR_ERR(sbi->s_chksum_driver);
			sbi->s_chksum_driver = NULL;
			goto failed_mount;
		}
	}

3509 3510 3511 3512 3513
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
3514
		ret = -EFSBADCRC;
3515 3516 3517 3518
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
3519
	if (ext4_has_feature_csum_seed(sb))
3520 3521
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
	else if (ext4_has_metadata_csum(sb))
3522 3523 3524
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

3525 3526
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3527
	set_opt(sb, INIT_INODE_TABLE);
3528
	if (def_mount_opts & EXT4_DEFM_DEBUG)
3529
		set_opt(sb, DEBUG);
3530
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3531
		set_opt(sb, GRPID);
3532
	if (def_mount_opts & EXT4_DEFM_UID16)
3533
		set_opt(sb, NO_UID32);
3534 3535
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
Theodore Ts'o's avatar
Theodore Ts'o committed
3536
#ifdef CONFIG_EXT4_FS_POSIX_ACL
3537
	set_opt(sb, POSIX_ACL);
3538
#endif
3539 3540 3541 3542
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

3543
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3544
		set_opt(sb, JOURNAL_DATA);
3545
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3546
		set_opt(sb, ORDERED_DATA);
3547
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3548
		set_opt(sb, WRITEBACK_DATA);
3549 3550

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3551
		set_opt(sb, ERRORS_PANIC);
3552
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3553
		set_opt(sb, ERRORS_CONT);
3554
	else
3555
		set_opt(sb, ERRORS_RO);
3556 3557
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
3558
	if (def_mount_opts & EXT4_DEFM_DISCARD)
3559
		set_opt(sb, DISCARD);
3560

3561 3562
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3563 3564 3565
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3566

3567
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3568
		set_opt(sb, BARRIER);
3569

3570 3571 3572 3573
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
3574
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3575
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3576
		set_opt(sb, DELALLOC);
3577

3578 3579 3580 3581 3582 3583
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
3597
	}
3598
	sbi->s_def_mount_opt = sbi->s_mount_opt;
3599
	if (!parse_options((char *) data, sb, &journal_devnum,
3600
			   &journal_ioprio, 0))
3601 3602
		goto failed_mount;

3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
			    "with data=journal disables delayed "
			    "allocation and O_DIRECT support!\n");
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
3614
				 "both data=journal and dioread_nolock");
3615 3616
			goto failed_mount;
		}
Ross Zwisler's avatar
Ross Zwisler committed
3617 3618 3619 3620 3621
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
3622 3623 3624 3625 3626
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
3627 3628
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
3629 3630
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
3631 3632
	}

3633
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3634
		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3635

3636
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3637 3638 3639
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
3640 3641 3642
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
3643

3644 3645
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
3646
		if (ext4_has_feature_64bit(sb)) {
3647 3648 3649 3650 3651 3652
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
	}

3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

3675 3676 3677 3678 3679
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
3680
	if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
3681
		goto failed_mount;
3682

3683
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3684 3685
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3686
		ext4_msg(sb, KERN_ERR,
3687 3688 3689 3690 3691 3692 3693 3694 3695
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
3696 3697
		goto failed_mount;
	}
3698 3699 3700 3701 3702 3703 3704
	if (le32_to_cpu(es->s_log_cluster_size) >
	    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log cluster size: %u",
			 le32_to_cpu(es->s_log_cluster_size));
		goto failed_mount;
	}
3705

3706 3707 3708 3709 3710 3711 3712
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

Ross Zwisler's avatar
Ross Zwisler committed
3713
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
3714 3715
		err = bdev_dax_supported(sb, blocksize);
		if (err)
Ross Zwisler's avatar
Ross Zwisler committed
3716 3717 3718
			goto failed_mount;
	}

3719
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3720 3721 3722 3723 3724
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

3725
	if (sb->s_blocksize != blocksize) {
3726 3727
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
3728
			ext4_msg(sb, KERN_ERR, "bad block size %d",
3729
					blocksize);
3730 3731 3732
			goto failed_mount;
		}

3733
		brelse(bh);
3734 3735
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3736
		bh = sb_bread_unmovable(sb, logical_sb_block);
3737
		if (!bh) {
3738 3739
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
3740 3741
			goto failed_mount;
		}
3742
		es = (struct ext4_super_block *)(bh->b_data + offset);
3743
		sbi->s_es = es;
3744
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3745 3746
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
3747 3748 3749 3750
			goto failed_mount;
		}
	}

3751
	has_huge_files = ext4_has_feature_huge_file(sb);
3752 3753 3754
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3755

3756 3757 3758
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3759 3760 3761
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3762 3763 3764 3765 3766
		if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
			ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
				 sbi->s_first_ino);
			goto failed_mount;
		}
3767
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
Vignesh Babu's avatar
Vignesh Babu committed
3768
		    (!is_power_of_2(sbi->s_inode_size)) ||
3769
		    (sbi->s_inode_size > blocksize)) {
3770 3771
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
3772
			       sbi->s_inode_size);
3773 3774
			goto failed_mount;
		}
Kalpak Shah's avatar
Kalpak Shah committed
3775 3776
		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3777
	}
3778

3779
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3780
	if (ext4_has_feature_64bit(sb)) {
3781
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3782
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
vignesh babu's avatar
vignesh babu committed
3783
		    !is_power_of_2(sbi->s_desc_size)) {
3784 3785
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
3786 3787 3788 3789 3790
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3791

3792 3793
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3794

3795
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3796
	if (sbi->s_inodes_per_block == 0)
3797
		goto cantfind_ext4;
3798 3799 3800 3801 3802 3803
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
			 sbi->s_blocks_per_group);
		goto failed_mount;
	}
3804 3805
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
3806
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3807 3808
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
3809 3810
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3811

3812
	for (i = 0; i < 4; i++)
3813 3814
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
3815
	if (ext4_has_feature_dir_index(sb)) {
3816 3817 3818 3819
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3820
#ifdef __CHAR_UNSIGNED__
3821 3822 3823 3824
			if (!(sb->s_flags & MS_RDONLY))
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
3825
#else
3826 3827 3828
			if (!(sb->s_flags & MS_RDONLY))
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3829
#endif
3830
		}
3831
	}
3832

3833 3834
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3835
	has_bigalloc = ext4_has_feature_bigalloc(sb);
3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862
	if (has_bigalloc) {
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
3863 3864 3865 3866
			ext4_msg(sb, KERN_ERR,
				 "fragment/cluster size (%d) != "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
3867 3868 3869 3870 3871 3872 3873 3874 3875
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
3876
	}
3877 3878
	sbi->s_cluster_ratio = clustersize / blocksize;

3879 3880 3881 3882
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

3883 3884 3885 3886
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
3887
	err = generic_check_addressable(sb->s_blocksize_bits,
3888
					ext4_blocks_count(es));
3889
	if (err) {
3890
		ext4_msg(sb, KERN_ERR, "filesystem"
3891
			 " too large to mount safely on this system");
3892
		if (sizeof(sector_t) < 8)
3893
			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3894 3895 3896
		goto failed_mount;
	}

3897 3898
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
3899

3900 3901 3902
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3903 3904
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
3905 3906 3907 3908
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

3909 3910 3911 3912 3913
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3914
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3915 3916 3917
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
3918 3919
		goto failed_mount;
	}
3920 3921 3922 3923 3924 3925 3926
	if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
	    (sbi->s_cluster_ratio == 1)) {
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
			 "block is 0 with a 1k block and cluster size");
		goto failed_mount;
	}

Laurent Vivier's avatar
Laurent Vivier committed
3927 3928 3929 3930
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
3931
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3932
		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
3933
		       "(block count %llu, first data block %u, "
3934
		       "blocks per group %lu)", sbi->s_groups_count,
3935 3936 3937 3938 3939
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
Laurent Vivier's avatar
Laurent Vivier committed
3940
	sbi->s_groups_count = blocks_count;
3941 3942
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3943 3944 3945 3946 3947 3948 3949 3950
	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
	    le32_to_cpu(es->s_inodes_count)) {
		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
			 le32_to_cpu(es->s_inodes_count),
			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
		ret = -EINVAL;
		goto failed_mount;
	}
3951 3952
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
3953
	if (ext4_has_feature_meta_bg(sb)) {
3954
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
3955 3956 3957 3958 3959 3960 3961
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
3962 3963
	rcu_assign_pointer(sbi->s_group_desc,
			   ext4_kvmalloc(db_count *
3964
					  sizeof(struct buffer_head *),
3965
					  GFP_KERNEL));
3966
	if (sbi->s_group_desc == NULL) {
3967
		ext4_msg(sb, KERN_ERR, "not enough memory");
3968
		ret = -ENOMEM;
3969 3970
		goto failed_mount;
	}
3971

3972
	bgl_lock_init(sbi->s_blockgroup_lock);
3973 3974

	for (i = 0; i < db_count; i++) {
3975 3976
		struct buffer_head *bh;

3977
		block = descriptor_loc(sb, logical_sb_block, i);
3978 3979
		bh = sb_bread_unmovable(sb, block);
		if (!bh) {
3980 3981
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
3982 3983 3984
			db_count = i;
			goto failed_mount2;
		}
3985 3986 3987
		rcu_read_lock();
		rcu_dereference(sbi->s_group_desc)[i] = bh;
		rcu_read_unlock();
3988
	}
3989
	sbi->s_gdb_count = db_count;
3990
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3991
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3992
		ret = -EFSCORRUPTED;
3993
		goto failed_mount2;
3994
	}
3995

3996 3997 3998
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

3999 4000
	setup_timer(&sbi->s_err_report, print_daily_error_info,
		(unsigned long) sb);
4001

4002
	/* Register extent status tree shrinker */
4003
	if (ext4_es_register_shrinker(sbi))
4004 4005
		goto failed_mount3;

4006
	sbi->s_stripe = ext4_get_stripe_size(sbi);
4007
	sbi->s_extent_max_zeroout_kb = 32;
4008

4009 4010 4011
	/*
	 * set up enough so that it can read an inode
	 */
4012
	sb->s_op = &ext4_sops;
4013 4014
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
4015
	sb->s_cop = &ext4_cryptops;
4016
#ifdef CONFIG_QUOTA
4017
	sb->dq_op = &ext4_quota_operations;
4018
	if (ext4_has_feature_quota(sb))
4019
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4020 4021
	else
		sb->s_qcop = &ext4_qctl_operations;
Li Xi's avatar
Li Xi committed
4022
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4023
#endif
4024 4025
	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));

4026
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4027
	mutex_init(&sbi->s_orphan_lock);
4028 4029 4030 4031

	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
4032
			  ext4_has_feature_journal_needs_recovery(sb));
4033

4034
	if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
4035
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4036
			goto failed_mount3a;
4037

4038 4039 4040 4041
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4042
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4043 4044
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4045
			goto failed_mount3a;
4046
	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
4047
		   ext4_has_feature_journal_needs_recovery(sb)) {
4048 4049
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4050
		goto failed_mount_wq;
4051
	} else {
4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
4075
		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
4076
		clear_opt(sb, JOURNAL_CHECKSUM);
4077
		clear_opt(sb, DATA_FLAGS);
4078 4079 4080
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4081 4082
	}

4083
	if (ext4_has_feature_64bit(sb) &&
4084 4085
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4086
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4087
		goto failed_mount_wq;
4088 4089
	}

4090 4091 4092 4093
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4094
	}
4095

4096 4097 4098 4099 4100
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
4101 4102 4103
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4104 4105
		if (jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
4106
			set_opt(sb, ORDERED_DATA);
4107
		else
4108
			set_opt(sb, JOURNAL_DATA);
4109 4110
		break;

4111 4112
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4113 4114
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4115 4116
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4117
			goto failed_mount_wq;
4118 4119 4120 4121
		}
	default:
		break;
	}
4122
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4123

Bobi Jam's avatar
Bobi Jam committed
4124 4125
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;

4126
no_journal:
4127 4128 4129 4130
	sbi->s_mb_cache = ext4_xattr_create_cache();
	if (!sbi->s_mb_cache) {
		ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
		goto failed_mount_wq;
4131 4132
	}

4133
	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4134
	    (blocksize != PAGE_SIZE)) {
4135 4136 4137 4138 4139
		ext4_msg(sb, KERN_ERR,
			 "Unsupported blocksize for fs encryption");
		goto failed_mount_wq;
	}

4140 4141 4142
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4143 4144 4145
		ext4_commit_super(sb, 1);
	}

4146 4147 4148 4149 4150 4151 4152
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4153 4154
		err = ext4_calculate_overhead(sb);
		if (err)
4155 4156 4157
			goto failed_mount_wq;
	}

Tejun Heo's avatar
Tejun Heo committed
4158 4159 4160 4161
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4162 4163 4164 4165
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4166
		ret = -ENOMEM;
4167 4168 4169
		goto failed_mount4;
	}

4170
	/*
4171
	 * The jbd2_journal_load will have done any necessary log recovery,
4172 4173 4174
	 * so we can safely mount the rest of the filesystem now.
	 */

4175 4176
	root = ext4_iget(sb, EXT4_ROOT_INO);
	if (IS_ERR(root)) {
4177
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4178
		ret = PTR_ERR(root);
4179
		root = NULL;
4180 4181 4182
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4183
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
Al Viro's avatar
Al Viro committed
4184
		iput(root);
4185 4186
		goto failed_mount4;
	}
4187
	sb->s_root = d_make_root(root);
4188
	if (!sb->s_root) {
4189
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4190 4191 4192
		ret = -ENOMEM;
		goto failed_mount4;
	}
4193

4194 4195
	if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
Kalpak Shah's avatar
Kalpak Shah committed
4196

4197
	ext4_clamp_want_extra_isize(sb);
Kalpak Shah's avatar
Kalpak Shah committed
4198

4199
	ext4_set_resv_clusters(sb);
Lukas Czerner's avatar
Lukas Czerner committed
4200

4201 4202
	err = ext4_setup_system_zone(sb);
	if (err) {
4203
		ext4_msg(sb, KERN_ERR, "failed to initialize system "
4204
			 "zone (%d)", err);
4205 4206 4207 4208 4209 4210 4211 4212
		goto failed_mount4a;
	}

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4213
		goto failed_mount5;
4214 4215
	}

4216 4217 4218
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4219
	ext4_superblock_csum_set(sb);
4220 4221
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4222 4223 4224
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4225
		ext4_superblock_csum_set(sb);
4226 4227
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4228 4229 4230
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4231
					  ext4_count_dirs(sb), GFP_KERNEL);
4232
	if (!err)
4233 4234
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4235
	if (!err)
4236
		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
4237

4238 4239 4240 4241 4242
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4243
	if (ext4_has_feature_flex_bg(sb))
4244 4245 4246 4247 4248 4249 4250
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4251 4252
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4253
		goto failed_mount6;
4254

4255
	err = ext4_register_sysfs(sb);
4256 4257
	if (err)
		goto failed_mount7;
Theodore Ts'o's avatar
Theodore Ts'o committed
4258

4259 4260
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4261
	if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
4262 4263 4264 4265 4266 4267
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4268 4269 4270
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4271
	if (needs_recovery) {
4272
		ext4_msg(sb, KERN_INFO, "recovery complete");
4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
		ext4_mark_recovery_complete(sb, es);
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

4285 4286 4287 4288 4289 4290 4291 4292
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

4293 4294
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4295 4296 4297
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
4298
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4299

4300 4301
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4302

4303 4304 4305 4306 4307
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);

4308
	kfree(orig_data);
4309 4310 4311 4312 4313
#ifdef CONFIG_EXT4_FS_ENCRYPTION
	memcpy(sbi->key_prefix, EXT4_KEY_DESC_PREFIX,
				EXT4_KEY_DESC_PREFIX_SIZE);
	sbi->key_prefix_size = EXT4_KEY_DESC_PREFIX_SIZE;
#endif
4314 4315
	return 0;

4316
cantfind_ext4:
4317
	if (!silent)
4318
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4319 4320
	goto failed_mount;

4321 4322
#ifdef CONFIG_QUOTA
failed_mount8:
4323
	ext4_unregister_sysfs(sb);
4324
#endif
4325 4326 4327
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
4328
	ext4_mb_release(sb);
4329
	if (sbi->s_flex_groups)
Al Viro's avatar
Al Viro committed
4330
		kvfree(sbi->s_flex_groups);
4331 4332 4333 4334
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4335
	percpu_free_rwsem(&sbi->s_writepages_rwsem);
4336
failed_mount5:
4337 4338 4339
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
Al Viro's avatar
Al Viro committed
4340
	dput(sb->s_root);
4341
	sb->s_root = NULL;
Al Viro's avatar
Al Viro committed
4342
failed_mount4:
4343
	ext4_msg(sb, KERN_ERR, "mount failed");
4344 4345
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4346
failed_mount_wq:
Jan Kara's avatar
Jan Kara committed
4347 4348 4349 4350
	if (sbi->s_mb_cache) {
		ext4_xattr_destroy_cache(sbi->s_mb_cache);
		sbi->s_mb_cache = NULL;
	}
4351 4352 4353 4354
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
4355
failed_mount3a:
4356
	ext4_es_unregister_shrinker(sbi);
4357
failed_mount3:
4358
	del_timer_sync(&sbi->s_err_report);
4359 4360
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
4361
failed_mount2:
4362 4363
	rcu_read_lock();
	group_desc = rcu_dereference(sbi->s_group_desc);
4364
	for (i = 0; i < db_count; i++)
4365 4366 4367
		brelse(group_desc[i]);
	kvfree(group_desc);
	rcu_read_unlock();
4368
failed_mount:
4369 4370
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
4371
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
4372
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4373 4374
		kfree(sbi->s_qf_names[i]);
#endif
4375
	ext4_blkdev_remove(sbi);
4376 4377 4378
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
4379
	kfree(sbi->s_blockgroup_lock);
4380
out_free_base:
4381
	kfree(sbi);
4382
	kfree(orig_data);
4383
	return err ? err : ret;
4384 4385 4386 4387 4388 4389 4390
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
4391
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4392
{
4393
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4394

4395 4396 4397
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
4398

4399
	write_lock(&journal->j_state_lock);
4400
	if (test_opt(sb, BARRIER))
4401
		journal->j_flags |= JBD2_BARRIER;
4402
	else
4403
		journal->j_flags &= ~JBD2_BARRIER;
4404 4405 4406 4407
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4408
	write_unlock(&journal->j_state_lock);
4409 4410
}

4411 4412
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
4413 4414 4415
{
	struct inode *journal_inode;

4416 4417 4418 4419 4420
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
4421 4422
	journal_inode = ext4_iget(sb, journal_inum);
	if (IS_ERR(journal_inode)) {
4423
		ext4_msg(sb, KERN_ERR, "no journal found");
4424 4425 4426 4427 4428
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
4429
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4430 4431 4432
		return NULL;
	}

4433
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4434
		  journal_inode, journal_inode->i_size);
4435
	if (!S_ISREG(journal_inode->i_mode)) {
4436
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4437 4438 4439
		iput(journal_inode);
		return NULL;
	}
4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

	BUG_ON(!ext4_has_feature_journal(sb));

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
4454

4455
	journal = jbd2_journal_init_inode(journal_inode);
4456
	if (!journal) {
4457
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4458 4459 4460 4461
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
4462
	ext4_init_journal_params(sb, journal);
4463 4464 4465
	return journal;
}

4466
static journal_t *ext4_get_dev_journal(struct super_block *sb,
4467 4468
				       dev_t j_dev)
{
4469
	struct buffer_head *bh;
4470
	journal_t *journal;
4471 4472
	ext4_fsblk_t start;
	ext4_fsblk_t len;
4473
	int hblock, blocksize;
4474
	ext4_fsblk_t sb_block;
4475
	unsigned long offset;
4476
	struct ext4_super_block *es;
4477 4478
	struct block_device *bdev;

4479
	BUG_ON(!ext4_has_feature_journal(sb));
4480

4481
	bdev = ext4_blkdev_get(j_dev, sb);
4482 4483 4484 4485
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
4486
	hblock = bdev_logical_block_size(bdev);
4487
	if (blocksize < hblock) {
4488 4489
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
4490 4491 4492
		goto out_bdev;
	}

4493 4494
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4495 4496
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4497 4498
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
4499 4500 4501
		goto out_bdev;
	}

4502
	es = (struct ext4_super_block *) (bh->b_data + offset);
4503
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4504
	    !(le32_to_cpu(es->s_feature_incompat) &
4505
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4506 4507
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
4508 4509 4510 4511
		brelse(bh);
		goto out_bdev;
	}

4512 4513 4514 4515 4516 4517 4518 4519 4520
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

4521
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4522
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4523 4524 4525 4526
		brelse(bh);
		goto out_bdev;
	}

Laurent Vivier's avatar
Laurent Vivier committed
4527
	len = ext4_blocks_count(es);
4528 4529 4530
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

4531
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4532 4533
					start, len, blocksize);
	if (!journal) {
4534
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4535 4536 4537
		goto out_bdev;
	}
	journal->j_private = sb;
4538
	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4539 4540
	wait_on_buffer(journal->j_sb_buffer);
	if (!buffer_uptodate(journal->j_sb_buffer)) {
4541
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4542 4543 4544
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4545 4546
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
4547 4548 4549
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
4550 4551
	EXT4_SB(sb)->journal_bdev = bdev;
	ext4_init_journal_params(sb, journal);
4552
	return journal;
4553

4554
out_journal:
4555
	jbd2_journal_destroy(journal);
4556
out_bdev:
4557
	ext4_blkdev_put(bdev);
4558 4559 4560
	return NULL;
}

4561 4562
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
4563 4564 4565 4566 4567 4568 4569 4570
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;

4571
	BUG_ON(!ext4_has_feature_journal(sb));
4572

4573 4574
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4575 4576
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

	really_read_only = bdev_read_only(sb->s_bdev);

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
4588
	if (ext4_has_feature_journal_needs_recovery(sb)) {
4589
		if (sb->s_flags & MS_RDONLY) {
4590 4591
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
4592
			if (really_read_only) {
4593 4594
				ext4_msg(sb, KERN_ERR, "write access "
					"unavailable, cannot proceed");
4595 4596
				return -EROFS;
			}
4597 4598
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
4599 4600 4601 4602
		}
	}

	if (journal_inum && journal_dev) {
4603 4604
		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
		       "and inode journals!");
4605 4606 4607 4608
		return -EINVAL;
	}

	if (journal_inum) {
4609
		if (!(journal = ext4_get_journal(sb, journal_inum)))
4610 4611
			return -EINVAL;
	} else {
4612
		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4613 4614 4615
			return -EINVAL;
	}

4616
	if (!(journal->j_flags & JBD2_BARRIER))
4617
		ext4_msg(sb, KERN_INFO, "barriers disabled");
4618

4619
	if (!ext4_has_feature_journal_needs_recovery(sb))
4620
		err = jbd2_journal_wipe(journal, !really_read_only);
4621 4622 4623 4624 4625
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4626
		err = jbd2_journal_load(journal);
4627 4628 4629 4630 4631
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
4632 4633

	if (err) {
4634
		ext4_msg(sb, KERN_ERR, "error loading journal");
4635
		jbd2_journal_destroy(journal);
4636 4637 4638
		return err;
	}

4639 4640
	EXT4_SB(sb)->s_journal = journal;
	ext4_clear_journal_err(sb, es);
4641

4642
	if (!really_read_only && journal_devnum &&
4643 4644 4645 4646
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
4647
		ext4_commit_super(sb, 1);
4648 4649 4650 4651 4652
	}

	return 0;
}

4653
static int ext4_commit_super(struct super_block *sb, int sync)
4654
{
4655
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4656
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4657
	int error = 0;
4658

4659
	if (!sbh || block_device_ejected(sb))
4660
		return error;
4661 4662 4663 4664 4665 4666 4667 4668

	/*
	 * The superblock bh should be mapped, but it might not be if the
	 * device was hot-removed. Not much we can do but fail the I/O.
	 */
	if (!buffer_mapped(sbh))
		return error;

4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
	if (!(sb->s_flags & MS_RDONLY))
		es->s_wtime = cpu_to_le32(get_seconds());
4681 4682 4683
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4684 4685
			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4686 4687 4688
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4689 4690
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
4691 4692
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
4693 4694 4695
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
4696
				&EXT4_SB(sb)->s_freeinodes_counter));
4697
	BUFFER_TRACE(sbh, "marking dirty");
4698
	ext4_superblock_csum_set(sb);
4699 4700
	if (sync)
		lock_buffer(sbh);
4701
	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
4715
	mark_buffer_dirty(sbh);
4716
	if (sync) {
4717
		unlock_buffer(sbh);
4718 4719
		error = __sync_dirty_buffer(sbh,
			test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
4720 4721 4722 4723 4724
		if (error)
			return error;

		error = buffer_write_io_error(sbh);
		if (error) {
4725 4726
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
4727 4728 4729 4730
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
4731
	return error;
4732 4733 4734 4735 4736 4737 4738
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
4739 4740
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es)
4741
{
4742
	journal_t *journal = EXT4_SB(sb)->s_journal;
4743

4744
	if (!ext4_has_feature_journal(sb)) {
4745 4746 4747
		BUG_ON(journal != NULL);
		return;
	}
4748
	jbd2_journal_lock_updates(journal);
4749 4750 4751
	if (jbd2_journal_flush(journal) < 0)
		goto out;

4752
	if (ext4_has_feature_journal_needs_recovery(sb) &&
4753
	    sb->s_flags & MS_RDONLY) {
4754
		ext4_clear_feature_journal_needs_recovery(sb);
4755
		ext4_commit_super(sb, 1);
4756
	}
4757 4758

out:
4759
	jbd2_journal_unlock_updates(journal);
4760 4761 4762 4763 4764 4765 4766
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
4767 4768
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es)
4769 4770 4771 4772 4773
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

4774
	BUG_ON(!ext4_has_feature_journal(sb));
4775

4776
	journal = EXT4_SB(sb)->s_journal;
4777 4778 4779

	/*
	 * Now check for any error status which may have been recorded in the
4780
	 * journal by a prior ext4_error() or ext4_abort()
4781 4782
	 */

4783
	j_errno = jbd2_journal_errno(journal);
4784 4785 4786
	if (j_errno) {
		char nbuf[16];

4787
		errstr = ext4_decode_error(sb, j_errno, nbuf);
4788
		ext4_warning(sb, "Filesystem error recorded "
4789
			     "from previous mount: %s", errstr);
4790
		ext4_warning(sb, "Marking fs in need of filesystem check.");
4791

4792 4793
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4794
		ext4_commit_super(sb, 1);
4795

4796
		jbd2_journal_clear_err(journal);
4797
		jbd2_journal_update_sb_errno(journal);
4798 4799 4800 4801 4802 4803 4804
	}
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
4805
int ext4_force_commit(struct super_block *sb)
4806 4807 4808 4809 4810 4811
{
	journal_t *journal;

	if (sb->s_flags & MS_RDONLY)
		return 0;

4812
	journal = EXT4_SB(sb)->s_journal;
4813
	return ext4_journal_force_commit(journal);
4814 4815
}

4816
static int ext4_sync_fs(struct super_block *sb, int wait)
4817
{
4818
	int ret = 0;
4819
	tid_t target;
4820
	bool needs_barrier = false;
4821
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4822

4823
	trace_ext4_sync_fs(sb, wait);
4824
	flush_workqueue(sbi->rsv_conversion_wq);
4825 4826 4827 4828 4829
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
4830 4831 4832 4833 4834
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
4847 4848 4849 4850 4851 4852
		needs_barrier = true;
	if (needs_barrier) {
		int err;
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
		if (!ret)
			ret = err;
4853
	}
4854 4855 4856 4857

	return ret;
}

4858 4859 4860
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
4861 4862
 *
 * Note that only this function cannot bring a filesystem to be in a clean
4863 4864
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
4865
 */
4866
static int ext4_freeze(struct super_block *sb)
4867
{
4868 4869
	int error = 0;
	journal_t *journal;
4870

4871 4872
	if (sb->s_flags & MS_RDONLY)
		return 0;
4873

4874
	journal = EXT4_SB(sb)->s_journal;
4875

4876 4877 4878
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
4879

4880 4881 4882 4883 4884 4885 4886
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
4887 4888

		/* Journal blocked and flushed, clear needs_recovery flag. */
4889
		ext4_clear_feature_journal_needs_recovery(sb);
4890
	}
4891 4892

	error = ext4_commit_super(sb, 1);
4893
out:
4894 4895 4896
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
4897
	return error;
4898 4899 4900 4901 4902 4903
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
4904
static int ext4_unfreeze(struct super_block *sb)
4905
{
4906 4907 4908
	if (sb->s_flags & MS_RDONLY)
		return 0;

4909 4910
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
4911
		ext4_set_feature_journal_needs_recovery(sb);
4912 4913
	}

4914
	ext4_commit_super(sb, 1);
4915
	return 0;
4916 4917
}

4918 4919 4920 4921 4922
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
4923
	unsigned long s_mount_opt2;
4924 4925
	kuid_t s_resuid;
	kgid_t s_resgid;
4926 4927 4928 4929
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4930
	char *s_qf_names[EXT4_MAXQUOTAS];
4931 4932 4933
#endif
};

4934
static int ext4_remount(struct super_block *sb, int *flags, char *data)
4935
{
4936
	struct ext4_super_block *es;
4937
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4938
	unsigned long old_sb_flags;
4939
	struct ext4_mount_options old_opts;
4940
	int enable_quota = 0;
4941
	ext4_group_t g;
4942
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4943
	int err = 0;
4944
#ifdef CONFIG_QUOTA
4945
	int i, j;
4946
#endif
4947
	char *orig_data = kstrdup(data, GFP_KERNEL);
4948 4949 4950 4951

	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
4952
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4953 4954 4955
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
4956 4957
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
4958 4959
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4960
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4961 4962 4963 4964 4965 4966
		if (sbi->s_qf_names[i]) {
			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
							 GFP_KERNEL);
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
4967
				kfree(orig_data);
4968 4969 4970 4971
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
4972
#endif
4973 4974
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
4975

4976
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
4977 4978 4979 4980
		err = -EINVAL;
		goto restore_opts;
	}

4981 4982
	ext4_clamp_want_extra_isize(sb);

4983
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4984 4985
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4986 4987
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
4988 4989
	}

4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
Ross Zwisler's avatar
Ross Zwisler committed
5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			err = -EINVAL;
			goto restore_opts;
		}
	}

	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
			"dax flag with busy inodes while remounting");
		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
5015 5016
	}

5017
	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
5018
		ext4_abort(sb, "Abort forced by user");
5019 5020

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
5021
		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
5022 5023 5024

	es = sbi->s_es;

5025
	if (sbi->s_journal) {
5026
		ext4_init_journal_params(sb, sbi->s_journal);
5027 5028
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5029

5030 5031 5032
	if (*flags & MS_LAZYTIME)
		sb->s_flags |= MS_LAZYTIME;

5033
	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
5034
		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5035 5036 5037 5038 5039
			err = -EROFS;
			goto restore_opts;
		}

		if (*flags & MS_RDONLY) {
5040 5041 5042
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5043 5044
			err = dquot_suspend(sb, -1);
			if (err < 0)
5045 5046
				goto restore_opts;

5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
			sb->s_flags |= MS_RDONLY;

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5058 5059
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5060 5061
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5062
			if (sbi->s_journal)
5063
				ext4_mark_recovery_complete(sb, es);
5064
		} else {
5065
			/* Make sure we can mount this feature set readwrite */
5066
			if (ext4_has_feature_readonly(sb) ||
5067
			    !ext4_feature_set_ok(sb, 0)) {
5068 5069 5070
				err = -EROFS;
				goto restore_opts;
			}
5071 5072
			/*
			 * Make sure the group descriptor checksums
5073
			 * are sane.  If they aren't, refuse to remount r/w.
5074 5075 5076 5077 5078
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5079
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5080 5081
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5082
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5083
					       le16_to_cpu(gdp->bg_checksum));
5084
					err = -EFSBADCRC;
5085 5086 5087 5088
					goto restore_opts;
				}
			}

5089 5090 5091 5092 5093 5094
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5095
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5096 5097
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5098
				       "umount/remount instead");
5099 5100 5101 5102
				err = -EINVAL;
				goto restore_opts;
			}

5103 5104 5105 5106 5107 5108
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5109 5110
			if (sbi->s_journal)
				ext4_clear_journal_err(sb, es);
5111
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5112
			if (!ext4_setup_super(sb, es, 0))
5113
				sb->s_flags &= ~MS_RDONLY;
5114
			if (ext4_has_feature_mmp(sb))
5115 5116 5117 5118 5119
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5120
			enable_quota = 1;
5121 5122
		}
	}
5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
	if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5136
	ext4_setup_system_zone(sb);
5137
	if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
5138
		ext4_commit_super(sb, 1);
5139

5140 5141
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
Jan Kara's avatar
Jan Kara committed
5142
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5143
		kfree(old_opts.s_qf_names[i]);
5144 5145 5146
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
5147
		else if (ext4_has_feature_quota(sb)) {
5148
			err = ext4_enable_quotas(sb);
5149
			if (err)
5150 5151 5152
				goto restore_opts;
		}
	}
5153
#endif
5154

5155
	*flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
5156 5157
	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
5158
	return 0;
5159

5160 5161 5162
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
5163
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5164 5165 5166
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
5167 5168
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
5169 5170
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
5171
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5172
		kfree(sbi->s_qf_names[i]);
5173 5174 5175
		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
	}
#endif
5176
	kfree(orig_data);
5177 5178 5179
	return err;
}

Li Xi's avatar
Li Xi committed
5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
	spin_lock(&dq_data_lock);

	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
		 dquot->dq_dqb.dqb_bsoftlimit :
		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
	if (limit && buf->f_blocks > limit) {
		curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

	limit = dquot->dq_dqb.dqb_isoftlimit ?
		dquot->dq_dqb.dqb_isoftlimit :
		dquot->dq_dqb.dqb_ihardlimit;
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

	spin_unlock(&dq_data_lock);
	dqput(dquot);
	return 0;
}
#endif

5222
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5223 5224
{
	struct super_block *sb = dentry->d_sb;
5225 5226
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
Lukas Czerner's avatar
Lukas Czerner committed
5227
	ext4_fsblk_t overhead = 0, resv_blocks;
Pekka Enberg's avatar
Pekka Enberg committed
5228
	u64 fsid;
5229
	s64 bfree;
Lukas Czerner's avatar
Lukas Czerner committed
5230
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5231

5232 5233
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
5234

5235
	buf->f_type = EXT4_SUPER_MAGIC;
5236
	buf->f_bsize = sb->s_blocksize;
5237
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5238 5239
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5240
	/* prevent underflow in case that few free space is available */
5241
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
Lukas Czerner's avatar
Lukas Czerner committed
5242 5243 5244
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5245 5246
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
5247
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5248
	buf->f_namelen = EXT4_NAME_LEN;
Pekka Enberg's avatar
Pekka Enberg committed
5249 5250 5251 5252
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5253

Li Xi's avatar
Li Xi committed
5254 5255 5256 5257 5258
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
5259 5260 5261
	return 0;
}

5262 5263
/* Helper function for writing quotas on sync - we need to start transaction
 * before quota file is locked for write. Otherwise the are possible deadlocks:
5264
 * Process 1                         Process 2
5265
 * ext4_create()                     quota_sync()
5266
 *   jbd2_journal_start()                  write_dquot()
5267
 *   dquot_initialize()                         down(dqio_mutex)
5268
 *     down(dqio_mutex)                    jbd2_journal_start()
5269 5270 5271 5272 5273 5274 5275
 *
 */

#ifdef CONFIG_QUOTA

static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
5276
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5277 5278
}

5279
static int ext4_write_dquot(struct dquot *dquot)
5280 5281 5282 5283 5284 5285
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
5286
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5287
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5288 5289 5290
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
5291
	err = ext4_journal_stop(handle);
5292 5293 5294 5295 5296
	if (!ret)
		ret = err;
	return ret;
}

5297
static int ext4_acquire_dquot(struct dquot *dquot)
5298 5299 5300 5301
{
	int ret, err;
	handle_t *handle;

5302
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5303
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5304 5305 5306
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
5307
	err = ext4_journal_stop(handle);
5308 5309 5310 5311 5312
	if (!ret)
		ret = err;
	return ret;
}

5313
static int ext4_release_dquot(struct dquot *dquot)
5314 5315 5316 5317
{
	int ret, err;
	handle_t *handle;

5318
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5319
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
Jan Kara's avatar
Jan Kara committed
5320 5321 5322
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
5323
		return PTR_ERR(handle);
Jan Kara's avatar
Jan Kara committed
5324
	}
5325
	ret = dquot_release(dquot);
5326
	err = ext4_journal_stop(handle);
5327 5328 5329 5330 5331
	if (!ret)
		ret = err;
	return ret;
}

5332
static int ext4_mark_dquot_dirty(struct dquot *dquot)
5333
{
5334 5335 5336
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

5337
	/* Are we journaling quotas? */
5338
	if (ext4_has_feature_quota(sb) ||
5339
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5340
		dquot_mark_dquot_dirty(dquot);
5341
		return ext4_write_dquot(dquot);
5342 5343 5344 5345 5346
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

5347
static int ext4_write_info(struct super_block *sb, int type)
5348 5349 5350 5351 5352
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
5353
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5354 5355 5356
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
5357
	err = ext4_journal_stop(handle);
5358 5359 5360 5361 5362 5363 5364 5365 5366
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
5367
static int ext4_quota_on_mount(struct super_block *sb, int type)
5368
{
5369 5370
	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
					EXT4_SB(sb)->s_jquota_fmt, type);
5371 5372
}

5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

5387 5388 5389
/*
 * Standard function to be called on quota_on
 */
5390
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5391
			 struct path *path)
5392 5393 5394 5395 5396
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
5397

5398
	/* Quotafile not on the same filesystem? */
5399
	if (path->dentry->d_sb != sb)
5400
		return -EXDEV;
5401 5402
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
5403
		/* Quotafile not in fs root? */
5404
		if (path->dentry->d_parent != sb->s_root)
5405 5406 5407
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
5408
	}
5409 5410 5411 5412 5413

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
5414
	if (EXT4_SB(sb)->s_journal &&
5415
	    ext4_should_journal_data(d_inode(path->dentry))) {
5416 5417 5418 5419 5420
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5421
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5422
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5423
		if (err)
5424
			return err;
5425
	}
5426 5427 5428 5429 5430 5431
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
	if (err)
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
	return err;
5432 5433
}

5434 5435 5436 5437 5438
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
Jan Kara's avatar
Jan Kara committed
5439
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5440
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
Li Xi's avatar
Li Xi committed
5441 5442
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5443 5444
	};

5445
	BUG_ON(!ext4_has_feature_quota(sb));
5446 5447 5448 5449 5450 5451 5452 5453 5454 5455

	if (!qf_inums[type])
		return -EPERM;

	qf_inode = ext4_iget(sb, qf_inums[type]);
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

5456 5457
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
5458
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5459
	err = dquot_enable(qf_inode, type, format_id, flags);
5460 5461
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5462
	iput(qf_inode);
5463 5464 5465 5466 5467 5468 5469 5470

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
Jan Kara's avatar
Jan Kara committed
5471
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5472
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
Li Xi's avatar
Li Xi committed
5473 5474
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5475
	};
5476 5477 5478 5479 5480
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
5481 5482

	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
Jan Kara's avatar
Jan Kara committed
5483
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5484 5485
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5486 5487
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5488
			if (err) {
5489 5490 5491
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

5492
				ext4_warning(sb,
5493 5494 5495
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
5496 5497 5498 5499 5500 5501 5502
				return err;
			}
		}
	}
	return 0;
}

5503 5504
static int ext4_quota_off(struct super_block *sb, int type)
{
5505 5506 5507
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;

5508 5509 5510
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
5511 5512
		sync_filesystem(sb);

5513 5514 5515
	if (!inode)
		goto out;

5516 5517
	/* Update modification times of quota files when userspace can
	 * start looking at them */
5518
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5519 5520 5521 5522 5523 5524 5525
	if (IS_ERR(handle))
		goto out;
	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);

out:
5526 5527 5528
	return dquot_quota_off(sb, type);
}

5529 5530
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
Lucas De Marchi's avatar
Lucas De Marchi committed
5531
 * itself serializes the operations (and no one else should touch the files)
5532
 * we don't have to be afraid of races */
5533
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5534 5535 5536
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
5537
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
5552 5553 5554
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
5570
static ssize_t ext4_quota_write(struct super_block *sb, int type,
5571 5572 5573
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
5574
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5575
	int err, offset = off & (sb->s_blocksize - 1);
5576
	int retries = 0;
5577 5578 5579
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

5580
	if (EXT4_SB(sb)->s_journal && !handle) {
5581 5582
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
Jan Kara's avatar
Jan Kara committed
5583 5584 5585
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

5597 5598 5599 5600 5601 5602
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
		 ext4_should_retry_alloc(inode->i_sb, &retries));
5603 5604
	if (IS_ERR(bh))
		return PTR_ERR(bh);
5605 5606
	if (!bh)
		goto out;
5607
	BUFFER_TRACE(bh, "get write access");
5608 5609 5610
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
5611
		return err;
5612
	}
5613 5614 5615 5616
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
5617
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5618
	brelse(bh);
5619
out:
5620 5621
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
5622
		EXT4_I(inode)->i_disksize = inode->i_size;
5623
		ext4_mark_inode_dirty(handle, inode);
5624
	}
5625
	return len;
5626 5627
}

5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
{
	const struct quota_format_ops	*ops;

	if (!sb_has_quota_loaded(sb, qid->type))
		return -ESRCH;
	ops = sb_dqopt(sb)->ops[qid->type];
	if (!ops || !ops->get_next_id)
		return -ENOSYS;
	return dquot_get_next_id(sb, qid);
}
5639 5640
#endif

Al Viro's avatar
Al Viro committed
5641 5642
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
5643
{
Al Viro's avatar
Al Viro committed
5644
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5645 5646
}

Jan Kara's avatar
Jan Kara committed
5647
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
5660 5661 5662

static inline int ext2_feature_set_ok(struct super_block *sb)
{
5663
	if (ext4_has_unknown_ext2_incompat_features(sb))
5664 5665 5666
		return 0;
	if (sb->s_flags & MS_RDONLY)
		return 1;
5667
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
5668 5669 5670
		return 0;
	return 1;
}
5671 5672 5673
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
5674
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
5689 5690 5691

static inline int ext3_feature_set_ok(struct super_block *sb)
{
5692
	if (ext4_has_unknown_ext3_incompat_features(sb))
5693
		return 0;
5694
	if (!ext4_has_feature_journal(sb))
5695 5696 5697
		return 0;
	if (sb->s_flags & MS_RDONLY)
		return 1;
5698
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
5699 5700 5701
		return 0;
	return 1;
}
5702

Theodore Ts'o's avatar
Theodore Ts'o committed
5703 5704 5705
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
Al Viro's avatar
Al Viro committed
5706
	.mount		= ext4_mount,
Theodore Ts'o's avatar
Theodore Ts'o committed
5707 5708 5709
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
5710
MODULE_ALIAS_FS("ext4");
Theodore Ts'o's avatar
Theodore Ts'o committed
5711

5712 5713 5714
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

5715
static int __init ext4_init_fs(void)
5716
{
5717
	int i, err;
5718

5719
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5720 5721 5722
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

5723
	/* Build-time check for flags consistency */
5724
	ext4_check_flag_values();
5725

5726
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
5727 5728
		init_waitqueue_head(&ext4__ioend_wq[i]);

5729
	err = ext4_init_es();
5730 5731
	if (err)
		return err;
5732 5733 5734

	err = ext4_init_pageio();
	if (err)
5735
		goto out5;
5736

5737
	err = ext4_init_system_zone();
5738
	if (err)
5739
		goto out4;
5740

5741
	err = ext4_init_sysfs();
5742
	if (err)
5743
		goto out3;
5744

5745
	err = ext4_init_mballoc();
5746 5747
	if (err)
		goto out2;
5748 5749 5750
	err = init_inodecache();
	if (err)
		goto out1;
5751
	register_as_ext3();
5752
	register_as_ext2();
Theodore Ts'o's avatar
Theodore Ts'o committed
5753
	err = register_filesystem(&ext4_fs_type);
5754 5755
	if (err)
		goto out;
5756

5757 5758
	return 0;
out:
5759 5760
	unregister_as_ext2();
	unregister_as_ext3();
5761 5762
	destroy_inodecache();
out1:
5763
	ext4_exit_mballoc();
5764
out2:
5765 5766
	ext4_exit_sysfs();
out3:
5767
	ext4_exit_system_zone();
5768
out4:
5769
	ext4_exit_pageio();
5770
out5:
5771 5772
	ext4_exit_es();

5773 5774 5775
	return err;
}

5776
static void __exit ext4_exit_fs(void)
5777
{
5778
	ext4_destroy_lazyinit_thread();
5779 5780
	unregister_as_ext2();
	unregister_as_ext3();
Theodore Ts'o's avatar
Theodore Ts'o committed
5781
	unregister_filesystem(&ext4_fs_type);
5782
	destroy_inodecache();
5783
	ext4_exit_mballoc();
5784
	ext4_exit_sysfs();
5785 5786
	ext4_exit_system_zone();
	ext4_exit_pageio();
5787
	ext4_exit_es();
5788 5789 5790
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5791
MODULE_DESCRIPTION("Fourth Extended Filesystem");
5792
MODULE_LICENSE("GPL");
5793 5794
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)