f2fs.h 94.3 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 * fs/f2fs/f2fs.h
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef _LINUX_F2FS_H
#define _LINUX_F2FS_H

#include <linux/types.h>
#include <linux/page-flags.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
20
#include <linux/kobject.h>
21
#include <linux/sched.h>
22
#include <linux/vmalloc.h>
23
#include <linux/bio.h>
24
#include <linux/blkdev.h>
25
#include <linux/quotaops.h>
26 27 28 29 30
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#include <linux/fscrypt_supp.h>
#else
#include <linux/fscrypt_notsupp.h>
#endif
31
#include <crypto/hash.h>
32

33
#ifdef CONFIG_F2FS_CHECK_FS
34
#define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
35
#else
36 37 38 39
#define f2fs_bug_on(sbi, condition)					\
	do {								\
		if (unlikely(condition)) {				\
			WARN_ON(1);					\
40
			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
41 42
		}							\
	} while (0)
43 44
#endif

Jaegeuk Kim's avatar
Jaegeuk Kim committed
45 46 47
#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
	FAULT_KMALLOC,
48
	FAULT_PAGE_ALLOC,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
49 50 51 52
	FAULT_ALLOC_NID,
	FAULT_ORPHAN,
	FAULT_BLOCK,
	FAULT_DIR_DEPTH,
53
	FAULT_EVICT_INODE,
54
	FAULT_TRUNCATE,
Chao Yu's avatar
Chao Yu committed
55
	FAULT_IO,
56
	FAULT_CHECKPOINT,
Jaegeuk Kim's avatar
Jaegeuk Kim committed
57 58 59
	FAULT_MAX,
};

60 61 62 63 64 65
struct f2fs_fault_info {
	atomic_t inject_ops;
	unsigned int inject_rate;
	unsigned int inject_type;
};

Jaegeuk Kim's avatar
Jaegeuk Kim committed
66
extern char *fault_name[FAULT_MAX];
67
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
Jaegeuk Kim's avatar
Jaegeuk Kim committed
68 69
#endif

70 71 72 73 74 75 76 77 78 79
/*
 * For mount options
 */
#define F2FS_MOUNT_BG_GC		0x00000001
#define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
#define F2FS_MOUNT_DISCARD		0x00000004
#define F2FS_MOUNT_NOHEAP		0x00000008
#define F2FS_MOUNT_XATTR_USER		0x00000010
#define F2FS_MOUNT_POSIX_ACL		0x00000020
#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
80
#define F2FS_MOUNT_INLINE_XATTR		0x00000080
81
#define F2FS_MOUNT_INLINE_DATA		0x00000100
82 83 84
#define F2FS_MOUNT_INLINE_DENTRY	0x00000200
#define F2FS_MOUNT_FLUSH_MERGE		0x00000400
#define F2FS_MOUNT_NOBARRIER		0x00000800
85
#define F2FS_MOUNT_FASTBOOT		0x00001000
86
#define F2FS_MOUNT_EXTENT_CACHE		0x00002000
87
#define F2FS_MOUNT_FORCE_FG_GC		0x00004000
88
#define F2FS_MOUNT_DATA_FLUSH		0x00008000
89
#define F2FS_MOUNT_FAULT_INJECTION	0x00010000
90 91
#define F2FS_MOUNT_ADAPTIVE		0x00020000
#define F2FS_MOUNT_LFS			0x00040000
92 93
#define F2FS_MOUNT_USRQUOTA		0x00080000
#define F2FS_MOUNT_GRPQUOTA		0x00100000
Chao Yu's avatar
Chao Yu committed
94
#define F2FS_MOUNT_PRJQUOTA		0x00200000
Chao Yu's avatar
Chao Yu committed
95
#define F2FS_MOUNT_QUOTA		0x00400000
96

97 98 99
#define clear_opt(sbi, option)	((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option)	((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
#define test_opt(sbi, option)	((sbi)->mount_opt.opt & F2FS_MOUNT_##option)
100 101 102 103 104

#define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
		typecheck(unsigned long long, b) &&			\
		((long long)((a) - (b)) > 0))

105 106 107 108
typedef u32 block_t;	/*
			 * should not change u32, since it is the on-disk block
			 * address format, __le32.
			 */
109 110 111 112 113 114
typedef u32 nid_t;

struct f2fs_mount_info {
	unsigned int	opt;
};

115 116 117 118
#define F2FS_FEATURE_ENCRYPT		0x0001
#define F2FS_FEATURE_BLKZONED		0x0002
#define F2FS_FEATURE_ATOMIC_WRITE	0x0004
#define F2FS_FEATURE_EXTRA_ATTR		0x0008
Chao Yu's avatar
Chao Yu committed
119
#define F2FS_FEATURE_PRJQUOTA		0x0010
Chao Yu's avatar
Chao Yu committed
120
#define F2FS_FEATURE_INODE_CHKSUM	0x0020
121

122 123 124
#define F2FS_HAS_FEATURE(sb, mask)					\
	((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
#define F2FS_SET_FEATURE(sb, mask)					\
125
	(F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask))
126
#define F2FS_CLEAR_FEATURE(sb, mask)					\
127
	(F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
128

129 130 131 132 133 134 135 136
/*
 * For checkpoint manager
 */
enum {
	NAT_BITMAP,
	SIT_BITMAP
};

137 138 139 140 141
#define	CP_UMOUNT	0x00000001
#define	CP_FASTBOOT	0x00000002
#define	CP_SYNC		0x00000004
#define	CP_RECOVERY	0x00000008
#define	CP_DISCARD	0x00000010
142
#define CP_TRIMMED	0x00000020
143

144
#define DEF_BATCHED_TRIM_SECTIONS	2048
Jaegeuk Kim's avatar
Jaegeuk Kim committed
145
#define BATCHED_TRIM_SEGMENTS(sbi)	\
146
		(GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
147 148
#define BATCHED_TRIM_BLOCKS(sbi)	\
		(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
149
#define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
Chao Yu's avatar
Chao Yu committed
150
#define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
151 152
#define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
#define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
153
#define DEF_CP_INTERVAL			60	/* 60 secs */
154
#define DEF_IDLE_INTERVAL		5	/* 5 secs */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
155

156 157
struct cp_control {
	int reason;
158 159 160 161
	__u64 trim_start;
	__u64 trim_end;
	__u64 trim_minlen;
	__u64 trimmed;
162 163
};

164
/*
165
 * For CP/NAT/SIT/SSA readahead
166 167 168 169
 */
enum {
	META_CP,
	META_NAT,
170
	META_SIT,
171 172
	META_SSA,
	META_POR,
173 174
};

175 176 177
/* for the list of ino */
enum {
	ORPHAN_INO,		/* for orphan ino list */
178 179
	APPEND_INO,		/* for append ino list */
	UPDATE_INO,		/* for update ino list */
180
	FLUSH_INO,		/* for multiple device flushing */
181 182 183 184
	MAX_INO_ENTRY,		/* max. list */
};

struct ino_entry {
185 186 187
	struct list_head list;		/* list head */
	nid_t ino;			/* inode number */
	unsigned int dirty_device;	/* dirty device bitmap */
188 189
};

190
/* for the list of inodes to be GCed */
191
struct inode_entry {
192 193 194 195
	struct list_head list;	/* list head */
	struct inode *inode;	/* vfs inode pointer */
};

196
/* for the bitmap indicate blocks to be discarded */
197 198
struct discard_entry {
	struct list_head list;	/* list head */
199 200
	block_t start_blkaddr;	/* start blockaddr of current segment */
	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
201 202
};

203 204 205
/* default discard granularity of inner discard thread, unit: block count */
#define DEFAULT_DISCARD_GRANULARITY		16

206 207 208 209 210
/* max discard pend list number */
#define MAX_PLIST_NUM		512
#define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
					(MAX_PLIST_NUM - 1) : (blk_num - 1))

211 212 213 214 215 216
enum {
	D_PREP,
	D_SUBMIT,
	D_DONE,
};

217 218 219 220 221 222
struct discard_info {
	block_t lstart;			/* logical start address */
	block_t len;			/* length */
	block_t start;			/* actual start address in dev */
};

223
struct discard_cmd {
224 225 226 227 228 229 230 231 232 233
	struct rb_node rb_node;		/* rb node located in rb-tree */
	union {
		struct {
			block_t lstart;	/* logical start address */
			block_t len;	/* length */
			block_t start;	/* actual start address in dev */
		};
		struct discard_info di;	/* discard info */

	};
234 235
	struct list_head list;		/* command list */
	struct completion wait;		/* compleation */
236
	struct block_device *bdev;	/* bdev */
237
	unsigned short ref;		/* reference count */
238
	unsigned char state;		/* state */
239
	int error;			/* bio error */
Chao Yu's avatar
Chao Yu committed
240 241
};

Chao Yu's avatar
Chao Yu committed
242 243 244 245 246 247 248 249
enum {
	DPOLICY_BG,
	DPOLICY_FORCE,
	DPOLICY_FSTRIM,
	DPOLICY_UMOUNT,
	MAX_DPOLICY,
};

Chao Yu's avatar
Chao Yu committed
250
struct discard_policy {
Chao Yu's avatar
Chao Yu committed
251
	int type;			/* type of discard */
Chao Yu's avatar
Chao Yu committed
252 253 254 255 256 257
	unsigned int min_interval;	/* used for candidates exist */
	unsigned int max_interval;	/* used for candidates not exist */
	unsigned int max_requests;	/* # of discards issued per round */
	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
	bool io_aware;			/* issue discard in idle time */
	bool sync;			/* submit discard with REQ_SYNC flag */
Chao Yu's avatar
Chao Yu committed
258
	unsigned int granularity;	/* discard granularity */
Chao Yu's avatar
Chao Yu committed
259 260
};

261
struct discard_cmd_control {
262
	struct task_struct *f2fs_issue_discard;	/* discard thread */
263
	struct list_head entry_list;		/* 4KB discard entry list */
264
	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
265
	unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */
266
	struct list_head wait_list;		/* store on-flushing entries */
267
	struct list_head fstrim_list;		/* in-flight discard from fstrim */
268
	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
269
	unsigned int discard_wake;		/* to wake up discard thread */
270
	struct mutex cmd_lock;
271 272
	unsigned int nr_discards;		/* # of discards in the list */
	unsigned int max_discards;		/* max. discards to be issued */
273
	unsigned int discard_granularity;	/* discard granularity */
Chao Yu's avatar
Chao Yu committed
274
	unsigned int undiscard_blks;		/* # of undiscard blocks */
275 276
	atomic_t issued_discard;		/* # of issued discard */
	atomic_t issing_discard;		/* # of issing discard */
277
	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
278
	struct rb_root root;			/* root of discard rb-tree */
Chao Yu's avatar
Chao Yu committed
279 280
};

281 282 283 284
/* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry {
	struct list_head list;	/* list head */
	struct inode *inode;	/* vfs inode pointer */
285 286
	block_t blkaddr;	/* block address locating the last fsync */
	block_t last_dentry;	/* block address locating the last dentry */
287 288
};

289 290
#define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
#define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
291

292 293 294 295
#define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
#define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
#define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
#define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
296

297 298
#define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
#define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
299

300
static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
301
{
302
	int before = nats_in_cursum(journal);
303

304
	journal->n_nats = cpu_to_le16(before + i);
305 306 307
	return before;
}

308
static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
309
{
310
	int before = sits_in_cursum(journal);
311

312
	journal->n_sits = cpu_to_le16(before + i);
313 314 315
	return before;
}

316 317
static inline bool __has_cursum_space(struct f2fs_journal *journal,
							int size, int type)
318 319
{
	if (type == NAT_JOURNAL)
320 321
		return size <= MAX_NAT_JENTRIES(journal);
	return size <= MAX_SIT_JENTRIES(journal);
322 323
}

324 325 326
/*
 * ioctl commands
 */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
327 328
#define F2FS_IOC_GETFLAGS		FS_IOC_GETFLAGS
#define F2FS_IOC_SETFLAGS		FS_IOC_SETFLAGS
329
#define F2FS_IOC_GETVERSION		FS_IOC_GETVERSION
Jaegeuk Kim's avatar
Jaegeuk Kim committed
330 331 332 333

#define F2FS_IOCTL_MAGIC		0xf5
#define F2FS_IOC_START_ATOMIC_WRITE	_IO(F2FS_IOCTL_MAGIC, 1)
#define F2FS_IOC_COMMIT_ATOMIC_WRITE	_IO(F2FS_IOCTL_MAGIC, 2)
334
#define F2FS_IOC_START_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 3)
335 336
#define F2FS_IOC_RELEASE_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 4)
#define F2FS_IOC_ABORT_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 5)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
337
#define F2FS_IOC_GARBAGE_COLLECT	_IOW(F2FS_IOCTL_MAGIC, 6, __u32)
338
#define F2FS_IOC_WRITE_CHECKPOINT	_IO(F2FS_IOCTL_MAGIC, 7)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
339 340
#define F2FS_IOC_DEFRAGMENT		_IOWR(F2FS_IOCTL_MAGIC, 8,	\
						struct f2fs_defragment)
341 342
#define F2FS_IOC_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
						struct f2fs_move_range)
343 344
#define F2FS_IOC_FLUSH_DEVICE		_IOW(F2FS_IOCTL_MAGIC, 10,	\
						struct f2fs_flush_device)
345 346
#define F2FS_IOC_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,	\
						struct f2fs_gc_range)
347
#define F2FS_IOC_GET_FEATURES		_IOR(F2FS_IOCTL_MAGIC, 12, __u32)
348

349 350 351
#define F2FS_IOC_SET_ENCRYPTION_POLICY	FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY	FS_IOC_GET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_PWSALT	FS_IOC_GET_ENCRYPTION_PWSALT
352

Jaegeuk Kim's avatar
Jaegeuk Kim committed
353 354 355 356 357 358 359 360
/*
 * should be same as XFS_IOC_GOINGDOWN.
 * Flags for going down operation used by FS_IOC_GOINGDOWN
 */
#define F2FS_IOC_SHUTDOWN	_IOR('X', 125, __u32)	/* Shutdown */
#define F2FS_GOING_DOWN_FULLSYNC	0x0	/* going down with full sync */
#define F2FS_GOING_DOWN_METASYNC	0x1	/* going down with metadata */
#define F2FS_GOING_DOWN_NOSYNC		0x2	/* going down */
361
#define F2FS_GOING_DOWN_METAFLUSH	0x3	/* going down with meta flush */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
362

363 364 365 366
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
 * ioctl commands in 32 bit emulation
 */
367 368 369
#define F2FS_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
#define F2FS_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
#define F2FS_IOC32_GETVERSION		FS_IOC32_GETVERSION
370 371
#endif

372 373 374
#define F2FS_IOC_FSGETXATTR		FS_IOC_FSGETXATTR
#define F2FS_IOC_FSSETXATTR		FS_IOC_FSSETXATTR

375 376 377 378 379 380
struct f2fs_gc_range {
	u32 sync;
	u64 start;
	u64 len;
};

Chao Yu's avatar
Chao Yu committed
381 382 383 384 385
struct f2fs_defragment {
	u64 start;
	u64 len;
};

386 387 388 389 390 391 392
struct f2fs_move_range {
	u32 dst_fd;		/* destination fd */
	u64 pos_in;		/* start position in src_fd */
	u64 pos_out;		/* start position in dst_fd */
	u64 len;		/* size to move */
};

393 394 395 396 397
struct f2fs_flush_device {
	u32 dev_num;		/* device number to flush */
	u32 segments;		/* # of segments to flush */
};

398 399
/* for inline stuff */
#define DEF_INLINE_RESERVED_SIZE	1
400 401 402 403
static inline int get_extra_isize(struct inode *inode);
#define MAX_INLINE_DATA(inode)	(sizeof(__le32) * \
				(CUR_ADDRS_PER_INODE(inode) - \
				DEF_INLINE_RESERVED_SIZE - \
404 405 406 407 408 409 410 411 412 413 414 415 416
				F2FS_INLINE_XATTR_ADDRS))

/* for inline dir */
#define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
				BITS_PER_BYTE + 1))
#define INLINE_DENTRY_BITMAP_SIZE(inode)	((NR_INLINE_DENTRY(inode) + \
					BITS_PER_BYTE - 1) / BITS_PER_BYTE)
#define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
				NR_INLINE_DENTRY(inode) + \
				INLINE_DENTRY_BITMAP_SIZE(inode)))

417 418 419
/*
 * For INODE and NODE manager
 */
420 421
/* for directory operations */
struct f2fs_dentry_ptr {
422
	struct inode *inode;
423
	void *bitmap;
424 425 426
	struct f2fs_dir_entry *dentry;
	__u8 (*filename)[F2FS_SLOT_LEN];
	int max;
427
	int nr_bitmap;
428 429
};

430 431
static inline void make_dentry_ptr_block(struct inode *inode,
		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
432
{
433
	d->inode = inode;
434
	d->max = NR_DENTRY_IN_BLOCK;
435
	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
436 437 438 439
	d->bitmap = &t->dentry_bitmap;
	d->dentry = t->dentry;
	d->filename = t->filename;
}
440

441
static inline void make_dentry_ptr_inline(struct inode *inode,
442
					struct f2fs_dentry_ptr *d, void *t)
443
{
444 445 446 447
	int entry_cnt = NR_INLINE_DENTRY(inode);
	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
	int reserved_size = INLINE_RESERVED_SIZE(inode);

448
	d->inode = inode;
449 450 451 452 453 454
	d->max = entry_cnt;
	d->nr_bitmap = bitmap_size;
	d->bitmap = t;
	d->dentry = t + bitmap_size + reserved_size;
	d->filename = t + bitmap_size + reserved_size +
					SIZE_OF_DIR_ENTRY * entry_cnt;
455 456
}

457 458 459 460 461 462 463
/*
 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
 * as its node offset to distinguish from index node blocks.
 * But some bits are used to mark the node block.
 */
#define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
				>> OFFSET_BIT_SHIFT)
464 465 466 467 468
enum {
	ALLOC_NODE,			/* allocate a new node page if needed */
	LOOKUP_NODE,			/* look up a node without readahead */
	LOOKUP_NODE_RA,			/*
					 * look up a node with readahead called
Chao Yu's avatar
Chao Yu committed
469
					 * by get_data_block.
470
					 */
471 472
};

473
#define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
474

475 476
#define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */

477 478 479
/* vector size for gang look-up from extent cache that consists of radix tree */
#define EXT_TREE_VEC_SIZE	64

480
/* for in-memory extent cache entry */
481 482 483 484
#define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */

/* number of extent info in extent cache we try to shrink */
#define EXTENT_CACHE_SHRINK_NUMBER	128
485

486 487 488 489 490 491
struct rb_entry {
	struct rb_node rb_node;		/* rb node located in rb-tree */
	unsigned int ofs;		/* start offset of the entry */
	unsigned int len;		/* length of the entry */
};

492
struct extent_info {
493 494
	unsigned int fofs;		/* start offset in a file */
	unsigned int len;		/* length of the extent */
495
	u32 blk;			/* start block address of the extent */
496 497 498
};

struct extent_node {
499 500 501 502 503 504 505 506 507 508
	struct rb_node rb_node;
	union {
		struct {
			unsigned int fofs;
			unsigned int len;
			u32 blk;
		};
		struct extent_info ei;	/* extent info */

	};
509
	struct list_head list;		/* node in global extent list of sbi */
510
	struct extent_tree *et;		/* extent tree pointer */
511 512 513 514 515
};

struct extent_tree {
	nid_t ino;			/* inode number */
	struct rb_root root;		/* root of extent info rb-tree */
516
	struct extent_node *cached_en;	/* recently accessed extent node */
517
	struct extent_info largest;	/* largested extent info */
518
	struct list_head list;		/* to be used by sbi->zombie_list */
519
	rwlock_t lock;			/* protect extent info rb-tree */
520
	atomic_t node_cnt;		/* # of extent node in rb-tree*/
521 522
};

Jaegeuk Kim's avatar
Jaegeuk Kim committed
523 524 525 526 527 528 529
/*
 * This structure is taken from ext4_map_blocks.
 *
 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
 */
#define F2FS_MAP_NEW		(1 << BH_New)
#define F2FS_MAP_MAPPED		(1 << BH_Mapped)
530 531 532
#define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
#define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
				F2FS_MAP_UNWRITTEN)
Jaegeuk Kim's avatar
Jaegeuk Kim committed
533 534 535 536 537 538

struct f2fs_map_blocks {
	block_t m_pblk;
	block_t m_lblk;
	unsigned int m_len;
	unsigned int m_flags;
539
	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
540 541
};

542
/* for flag in get_data_block */
543 544 545 546 547 548 549
enum {
	F2FS_GET_BLOCK_DEFAULT,
	F2FS_GET_BLOCK_FIEMAP,
	F2FS_GET_BLOCK_BMAP,
	F2FS_GET_BLOCK_PRE_DIO,
	F2FS_GET_BLOCK_PRE_AIO,
};
550

551 552 553 554
/*
 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
 */
#define FADVISE_COLD_BIT	0x01
555
#define FADVISE_LOST_PINO_BIT	0x02
556
#define FADVISE_ENCRYPT_BIT	0x04
557
#define FADVISE_ENC_NAME_BIT	0x08
558
#define FADVISE_KEEP_SIZE_BIT	0x10
559

560 561 562 563 564 565
#define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
#define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
#define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
#define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
#define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
566 567 568
#define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
#define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
569 570
#define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
571 572
#define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
573

574 575
#define DEF_DIR_LEVEL		0

576 577 578 579
struct f2fs_inode_info {
	struct inode vfs_inode;		/* serve a vfs inode */
	unsigned long i_flags;		/* keep an inode flags for ioctl */
	unsigned char i_advise;		/* use to give file attribute hints */
580
	unsigned char i_dir_level;	/* use for dentry level for large dir */
581
	unsigned int i_current_depth;	/* use only in directory structure */
582
	unsigned int i_pino;		/* parent inode number */
583 584 585 586
	umode_t i_acl_mode;		/* keep file acl mode temporarily */

	/* Use below internally in f2fs*/
	unsigned long flags;		/* use to pass per-file flags */
587
	struct rw_semaphore i_sem;	/* protect fi info */
588
	atomic_t dirty_pages;		/* # of dirty pages */
589 590
	f2fs_hash_t chash;		/* hash value of given file name */
	unsigned int clevel;		/* maximum level of given file name */
591
	struct task_struct *task;	/* lookup and create consistency */
Chao Yu's avatar
Chao Yu committed
592
	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
593
	nid_t i_xattr_nid;		/* node id that contains xattrs */
594
	loff_t	last_disk_size;		/* lastly written file size */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
595

596 597 598 599 600 601
#ifdef CONFIG_QUOTA
	struct dquot *i_dquot[MAXQUOTAS];

	/* quota space reservation, managed internally by quota code */
	qsize_t i_reserved_quota;
#endif
602 603
	struct list_head dirty_list;	/* dirty list for dirs and files */
	struct list_head gdirty_list;	/* linked in global dirty list */
604
	struct list_head inmem_ilist;	/* list for inmem inodes */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
605
	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
606
	struct task_struct *inmem_task;	/* store inmemory task */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
607
	struct mutex inmem_lock;	/* lock for inmemory pages */
608
	struct extent_tree *extent_tree;	/* cached extent_tree entry */
609
	struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
610
	struct rw_semaphore i_mmap_sem;
611
	struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
612

613
	int i_extra_isize;		/* size of extra space located in i_addr */
Chao Yu's avatar
Chao Yu committed
614
	kprojid_t i_projid;		/* id for project quota */
615 616 617
};

static inline void get_extent_info(struct extent_info *ext,
Chao Yu's avatar
Chao Yu committed
618
					struct f2fs_extent *i_ext)
619
{
Chao Yu's avatar
Chao Yu committed
620 621 622
	ext->fofs = le32_to_cpu(i_ext->fofs);
	ext->blk = le32_to_cpu(i_ext->blk);
	ext->len = le32_to_cpu(i_ext->len);
623 624 625 626 627 628
}

static inline void set_raw_extent(struct extent_info *ext,
					struct f2fs_extent *i_ext)
{
	i_ext->fofs = cpu_to_le32(ext->fofs);
629
	i_ext->blk = cpu_to_le32(ext->blk);
630 631 632
	i_ext->len = cpu_to_le32(ext->len);
}

633 634 635 636 637 638 639 640
static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
						u32 blk, unsigned int len)
{
	ei->fofs = fofs;
	ei->blk = blk;
	ei->len = len;
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
static inline bool __is_discard_mergeable(struct discard_info *back,
						struct discard_info *front)
{
	return back->lstart + back->len == front->lstart;
}

static inline bool __is_discard_back_mergeable(struct discard_info *cur,
						struct discard_info *back)
{
	return __is_discard_mergeable(back, cur);
}

static inline bool __is_discard_front_mergeable(struct discard_info *cur,
						struct discard_info *front)
{
	return __is_discard_mergeable(cur, front);
}

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static inline bool __is_extent_mergeable(struct extent_info *back,
						struct extent_info *front)
{
	return (back->fofs + back->len == front->fofs &&
			back->blk + back->len == front->blk);
}

static inline bool __is_back_mergeable(struct extent_info *cur,
						struct extent_info *back)
{
	return __is_extent_mergeable(back, cur);
}

static inline bool __is_front_mergeable(struct extent_info *cur,
						struct extent_info *front)
{
	return __is_extent_mergeable(cur, front);
}

678
extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
679 680
static inline void __try_update_largest_extent(struct inode *inode,
			struct extent_tree *et, struct extent_node *en)
681
{
682
	if (en->ei.len > et->largest.len) {
683
		et->largest = en->ei;
684
		f2fs_mark_inode_dirty_sync(inode, true);
685
	}
686 687
}

688 689 690 691 692 693 694
/*
 * For free nid management
 */
enum nid_state {
	FREE_NID,		/* newly added to free nid list */
	PREALLOC_NID,		/* it is preallocated */
	MAX_NID_STATE,
Chao Yu's avatar
Chao Yu committed
695 696
};

697 698 699
struct f2fs_nm_info {
	block_t nat_blkaddr;		/* base disk address of NAT */
	nid_t max_nid;			/* maximum possible node ids */
700
	nid_t available_nids;		/* # of available node ids */
701
	nid_t next_scan_nid;		/* the next nid to be scanned */
702
	unsigned int ram_thresh;	/* control the memory footprint */
703
	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
704
	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
705 706 707

	/* NAT cache management */
	struct radix_tree_root nat_root;/* root of the nat entry cache */
708
	struct radix_tree_root nat_set_root;/* root of the nat set cache */
709
	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
710
	struct list_head nat_entries;	/* cached nat entry list (clean) */
711
	unsigned int nat_cnt;		/* the # of cached nat entries */
712
	unsigned int dirty_nat_cnt;	/* total num of nat entries in set */
713
	unsigned int nat_blocks;	/* # of nat blocks */
714 715

	/* free node ids management */
716
	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
717 718
	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
Chao Yu's avatar
Chao Yu committed
719
	spinlock_t nid_list_lock;	/* protect nid lists ops */
720
	struct mutex build_lock;	/* lock for build free nids */
Chao Yu's avatar
Chao Yu committed
721 722
	unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
	unsigned char *nat_block_bitmap;
723
	unsigned short *free_nid_count;	/* free nid count of NAT block */
724 725 726

	/* for checkpoint */
	char *nat_bitmap;		/* NAT bitmap pointer */
727 728 729 730 731

	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
	unsigned char *nat_bits;	/* NAT bits blocks */
	unsigned char *full_nat_bits;	/* full NAT pages */
	unsigned char *empty_nat_bits;	/* empty NAT pages */
732 733 734
#ifdef CONFIG_F2FS_CHECK_FS
	char *nat_bitmap_mir;		/* NAT bitmap mirror */
#endif
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	int bitmap_size;		/* bitmap size */
};

/*
 * this structure is used as one of function parameters.
 * all the information are dedicated to a given direct node block determined
 * by the data offset in a file.
 */
struct dnode_of_data {
	struct inode *inode;		/* vfs inode pointer */
	struct page *inode_page;	/* its inode page, NULL is possible */
	struct page *node_page;		/* cached direct node page */
	nid_t nid;			/* node id of the direct node block */
	unsigned int ofs_in_node;	/* data offset in the node page */
	bool inode_page_locked;		/* inode page is locked or not */
750
	bool node_changed;		/* is node block changed */
751 752
	char cur_level;			/* level of hole node page */
	char max_level;			/* level of current page located */
753 754 755 756 757 758
	block_t	data_blkaddr;		/* block address of the node block */
};

static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
		struct page *ipage, struct page *npage, nid_t nid)
{
759
	memset(dn, 0, sizeof(*dn));
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	dn->inode = inode;
	dn->inode_page = ipage;
	dn->node_page = npage;
	dn->nid = nid;
}

/*
 * For SIT manager
 *
 * By default, there are 6 active log areas across the whole main area.
 * When considering hot and cold data separation to reduce cleaning overhead,
 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
 * respectively.
 * In the current design, you should not change the numbers intentionally.
 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
 * logs individually according to the underlying devices. (default: 6)
 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
 * data and 8 for node logs.
 */
#define	NR_CURSEG_DATA_TYPE	(3)
#define NR_CURSEG_NODE_TYPE	(3)
#define NR_CURSEG_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)

enum {
	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
	CURSEG_WARM_DATA,	/* data blocks */
	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
	CURSEG_COLD_NODE,	/* indirect node blocks */
790
	NO_CHECK_TYPE,
791 792
};

793 794
struct flush_cmd {
	struct completion wait;
795
	struct llist_node llnode;
796
	nid_t ino;
797 798 799
	int ret;
};

800 801 802
struct flush_cmd_control {
	struct task_struct *f2fs_issue_flush;	/* flush thread */
	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
803 804
	atomic_t issued_flush;			/* # of issued flushes */
	atomic_t issing_flush;			/* # of issing flushes */
805 806
	struct llist_head issue_list;		/* list for command issue */
	struct llist_node *dispatch_list;	/* list for command dispatch */
807 808
};

809 810 811 812 813 814 815 816 817 818 819 820 821 822
struct f2fs_sm_info {
	struct sit_info *sit_info;		/* whole segment information */
	struct free_segmap_info *free_info;	/* free segment information */
	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
	struct curseg_info *curseg_array;	/* active segment information */

	block_t seg0_blkaddr;		/* block address of 0'th segment */
	block_t main_blkaddr;		/* start block address of main area */
	block_t ssa_blkaddr;		/* start block address of SSA area */

	unsigned int segment_count;	/* total # of segments */
	unsigned int main_segments;	/* # of segments in main area */
	unsigned int reserved_segments;	/* # of reserved segments */
	unsigned int ovp_segments;	/* # of overprovision segments */
823 824 825

	/* a threshold to reclaim prefree segments */
	unsigned int rec_prefree_segments;
826

Jaegeuk Kim's avatar
Jaegeuk Kim committed
827 828 829
	/* for batched trimming */
	unsigned int trim_sections;		/* # of sections to trim */

830 831
	struct list_head sit_entry_set;	/* sit entry set list */

832 833
	unsigned int ipu_policy;	/* in-place-update policy */
	unsigned int min_ipu_util;	/* in-place-update threshold */
834
	unsigned int min_fsync_blocks;	/* threshold for fsync */
835
	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
836 837

	/* for flush command control */
838
	struct flush_cmd_control *fcc_info;
839

840 841
	/* for discard command control */
	struct discard_cmd_control *dcc_info;
842 843 844 845 846 847 848 849 850 851 852
};

/*
 * For superblock
 */
/*
 * COUNT_TYPE for monitoring
 *
 * f2fs monitors the number of several block types such as on-writeback,
 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
 */
853
#define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
854 855
enum count_type {
	F2FS_DIRTY_DENTS,
856
	F2FS_DIRTY_DATA,
857 858
	F2FS_DIRTY_NODES,
	F2FS_DIRTY_META,
859
	F2FS_INMEM_PAGES,
860
	F2FS_DIRTY_IMETA,
861 862
	F2FS_WB_CP_DATA,
	F2FS_WB_DATA,
863 864 865 866
	NR_COUNT_TYPE,
};

/*
arter97's avatar
arter97 committed
867
 * The below are the page types of bios used in submit_bio().
868 869 870 871 872 873 874 875 876
 * The available types are:
 * DATA			User data pages. It operates as async mode.
 * NODE			Node pages. It operates as async mode.
 * META			FS metadata pages such as SIT, NAT, CP.
 * NR_PAGE_TYPE		The number of page types.
 * META_FLUSH		Make sure the previous pages are written
 *			with waiting the bio's completion
 * ...			Only can be used with META.
 */
877
#define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
878 879 880 881 882 883
enum page_type {
	DATA,
	NODE,
	META,
	NR_PAGE_TYPE,
	META_FLUSH,
884 885
	INMEM,		/* the below types are used by tracepoints only. */
	INMEM_DROP,
886
	INMEM_INVALIDATE,
887
	INMEM_REVOKE,
888 889
	IPU,
	OPU,
890 891
};

Jaegeuk Kim's avatar
Jaegeuk Kim committed
892 893 894 895 896 897 898
enum temp_type {
	HOT = 0,	/* must be zero for meta bio */
	WARM,
	COLD,
	NR_TEMP_TYPE,
};

899 900 901 902 903 904
enum need_lock_type {
	LOCK_REQ = 0,
	LOCK_DONE,
	LOCK_RETRY,
};

Chao Yu's avatar
Chao Yu committed
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
enum iostat_type {
	APP_DIRECT_IO,			/* app direct IOs */
	APP_BUFFERED_IO,		/* app buffered IOs */
	APP_WRITE_IO,			/* app write IOs */
	APP_MAPPED_IO,			/* app mapped IOs */
	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
	FS_META_IO,			/* meta IOs from kworker/reclaimer */
	FS_GC_DATA_IO,			/* data IOs from forground gc */
	FS_GC_NODE_IO,			/* node IOs from forground gc */
	FS_CP_DATA_IO,			/* data IOs from checkpoint */
	FS_CP_NODE_IO,			/* node IOs from checkpoint */
	FS_CP_META_IO,			/* meta IOs from checkpoint */
	FS_DISCARD,			/* discard */
	NR_IO_TYPE,
};

922
struct f2fs_io_info {
923
	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
924
	nid_t ino;		/* inode number */
925
	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
926
	enum temp_type temp;	/* contains HOT/WARM/COLD */
Mike Christie's avatar
Mike Christie committed
927
	int op;			/* contains REQ_OP_ */
928
	int op_flags;		/* req_flag_bits */
929
	block_t new_blkaddr;	/* new block address to be written */
930
	block_t old_blkaddr;	/* old block address before Cow */
931
	struct page *page;	/* page to be written */
932
	struct page *encrypted_page;	/* encrypted page */
933
	struct list_head list;		/* serialize IOs */
934
	bool submitted;		/* indicate IO submission */
935
	int need_lock;		/* indicate we need to lock cp_rwsem */
936
	bool in_list;		/* indicate fio is in io_list */
Chao Yu's avatar
Chao Yu committed
937
	enum iostat_type io_type;	/* io type */
938 939
};

940
#define is_read_io(rw) ((rw) == READ)
941
struct f2fs_bio_info {
942
	struct f2fs_sb_info *sbi;	/* f2fs superblock */
943 944
	struct bio *bio;		/* bios to merge */
	sector_t last_block_in_bio;	/* last block number */
945
	struct f2fs_io_info fio;	/* store buffered io info. */
946
	struct rw_semaphore io_rwsem;	/* blocking op for bio */
947 948
	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
	struct list_head io_list;	/* track fios */
949 950
};

951 952 953 954 955 956 957 958 959 960 961 962 963 964
#define FDEV(i)				(sbi->devs[i])
#define RDEV(i)				(raw_super->devs[i])
struct f2fs_dev_info {
	struct block_device *bdev;
	char path[MAX_PATH_LEN];
	unsigned int total_segments;
	block_t start_blk;
	block_t end_blk;
#ifdef CONFIG_BLK_DEV_ZONED
	unsigned int nr_blkz;			/* Total number of zones */
	u8 *blkz_type;				/* Array of zones type */
#endif
};

965 966 967
enum inode_type {
	DIR_INODE,			/* for dirty dir inode */
	FILE_INODE,			/* for dirty regular/symlink inode */
968
	DIRTY_META,			/* for all dirtied inode metadata */
969
	ATOMIC_FILE,			/* for all atomic files */
970 971 972
	NR_INODE_TYPE,
};

973 974 975 976 977 978 979 980
/* for inner inode cache management */
struct inode_management {
	struct radix_tree_root ino_root;	/* ino entry array */
	spinlock_t ino_lock;			/* for ino entry lock */
	struct list_head ino_list;		/* inode list head */
	unsigned long ino_num;			/* number of entries */
};

981 982 983 984 985 986
/* For s_flag in struct f2fs_sb_info */
enum {
	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
	SBI_IS_CLOSE,				/* specify unmounting */
	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
	SBI_POR_DOING,				/* recovery is doing or not */
987
	SBI_NEED_SB_WRITE,			/* need to recover superblock */
988
	SBI_NEED_CP,				/* need to checkpoint */
989 990
};

991 992
enum {
	CP_TIME,
993
	REQ_TIME,
994 995 996
	MAX_TIME,
};

997 998
struct f2fs_sb_info {
	struct super_block *sb;			/* pointer to VFS super block */
999
	struct proc_dir_entry *s_proc;		/* proc entry */
1000
	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1001
	int valid_super_block;			/* valid super block no */
1002
	unsigned long s_flag;				/* flags for sbi */
1003

1004 1005 1006 1007 1008
#ifdef CONFIG_BLK_DEV_ZONED
	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
#endif

1009 1010 1011 1012 1013 1014
	/* for node-related operations */
	struct f2fs_nm_info *nm_info;		/* node manager */
	struct inode *node_inode;		/* cache node blocks */

	/* for segment-related operations */
	struct f2fs_sm_info *sm_info;		/* segment manager */
1015 1016

	/* for bio operations */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1017
	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
Chao Yu's avatar
Chao Yu committed
1018 1019
	struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
						/* bio ordering for NODE/DATA */
1020 1021
	int write_io_size_bits;			/* Write IO size bits */
	mempool_t *write_io_dummy;		/* Dummy pages */
1022 1023 1024

	/* for checkpoint */
	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1025
	int cur_cp_pack;			/* remain current cp pack */
1026
	spinlock_t cp_lock;			/* for flag in ckpt */
1027
	struct inode *meta_inode;		/* cache meta blocks */
1028
	struct mutex cp_mutex;			/* checkpoint procedure lock */
1029
	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
1030
	struct rw_semaphore node_write;		/* locking node writes */
1031
	struct rw_semaphore node_change;	/* locking node change */
1032
	wait_queue_head_t cp_wait;
1033 1034
	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
	long interval_time[MAX_TIME];		/* to store thresholds */
1035

1036
	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
1037 1038

	/* for orphan inode, use 0'th array */
1039
	unsigned int max_orphans;		/* max orphan inodes */
1040

1041 1042 1043
	/* for inode management */
	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1044

1045 1046
	/* for extent tree cache */
	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1047
	struct mutex extent_tree_lock;	/* locking extent radix tree */
1048 1049
	struct list_head extent_list;		/* lru list for shrinker */
	spinlock_t extent_lock;			/* locking extent lru list */
1050
	atomic_t total_ext_tree;		/* extent tree count */
1051
	struct list_head zombie_list;		/* extent zombie tree list */
1052
	atomic_t total_zombie_tree;		/* extent zombie tree count */
1053 1054
	atomic_t total_ext_node;		/* extent info count */

arter97's avatar
arter97 committed
1055
	/* basic filesystem units */
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	unsigned int log_sectors_per_block;	/* log2 sectors per block */
	unsigned int log_blocksize;		/* log2 block size */
	unsigned int blocksize;			/* block size */
	unsigned int root_ino_num;		/* root inode number*/
	unsigned int node_ino_num;		/* node inode number*/
	unsigned int meta_ino_num;		/* meta inode number*/
	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
	unsigned int blocks_per_seg;		/* blocks per segment */
	unsigned int segs_per_sec;		/* segments per section */
	unsigned int secs_per_zone;		/* sections per zone */
	unsigned int total_sections;		/* total section count */
	unsigned int total_node_count;		/* total node block count */
	unsigned int total_valid_node_count;	/* valid node block count */
1069
	loff_t max_file_blocks;			/* max block index of file */
1070
	int active_logs;			/* # of active logs */
1071
	int dir_level;				/* directory level */
1072 1073 1074

	block_t user_block_count;		/* # of user blocks */
	block_t total_valid_block_count;	/* # of valid blocks */
1075
	block_t discard_blks;			/* discard command candidats */
1076
	block_t last_valid_block_count;		/* for recovery */
1077 1078
	block_t reserved_blocks;		/* configurable reserved blocks */

1079
	u32 s_next_generation;			/* for NFS support */
1080 1081

	/* # of pages, see count_type */
1082
	atomic_t nr_pages[NR_COUNT_TYPE];
1083 1084
	/* # of allocated blocks */
	struct percpu_counter alloc_valid_block_count;
1085

1086 1087 1088
	/* writeback control */
	atomic_t wb_sync_req;			/* count # of WB_SYNC threads */

1089 1090 1091
	/* valid inode count */
	struct percpu_counter total_valid_inode_count;

1092 1093 1094 1095 1096
	struct f2fs_mount_info mount_opt;	/* mount options */

	/* for cleaning operations */
	struct mutex gc_mutex;			/* mutex for GC */
	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1097
	unsigned int cur_victim_sec;		/* current victim section num */
1098

1099 1100 1101
	/* threshold for converting bg victims for fg */
	u64 fggc_threshold;

1102 1103 1104
	/* maximum # of trials to find a victim segment for SSR and GC */
	unsigned int max_victim_search;

1105 1106 1107 1108
	/*
	 * for stat information.
	 * one is for the LFS mode, and the other is for the SSR mode.
	 */
1109
#ifdef CONFIG_F2FS_STAT_FS
1110 1111 1112
	struct f2fs_stat_info *stat_info;	/* FS status information */
	unsigned int segment_count[2];		/* # of allocated segments */
	unsigned int block_count[2];		/* # of allocated blocks */
1113
	atomic_t inplace_count;		/* # of inplace update */
1114 1115 1116 1117
	atomic64_t total_hit_ext;		/* # of lookup extent cache */
	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
	atomic64_t read_hit_largest;		/* # of hit largest extent node */
	atomic64_t read_hit_cached;		/* # of hit cached extent node */
1118
	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1119 1120
	atomic_t inline_inode;			/* # of inline_data inodes */
	atomic_t inline_dir;			/* # of inline_dentry inodes */
1121
	atomic_t aw_cnt;			/* # of atomic writes */
1122
	atomic_t vw_cnt;			/* # of volatile writes */
1123
	atomic_t max_aw_cnt;			/* max # of atomic writes */
1124
	atomic_t max_vw_cnt;			/* max # of volatile writes */
1125
	int bg_gc;				/* background gc calls */
1126
	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1127
#endif
1128
	spinlock_t stat_lock;			/* lock for stat operations */
1129

Chao Yu's avatar
Chao Yu committed
1130 1131 1132 1133 1134
	/* For app/fs IO statistics */
	spinlock_t iostat_lock;
	unsigned long long write_iostat[NR_IO_TYPE];
	bool iostat_enable;

1135 1136 1137
	/* For sysfs suppport */
	struct kobject s_kobj;
	struct completion s_kobj_unregister;
1138 1139 1140

	/* For shrinker support */
	struct list_head s_list;
1141 1142
	int s_ndevs;				/* number of devices */
	struct f2fs_dev_info *devs;		/* for device list */
1143 1144
	unsigned int dirty_device;		/* for checkpoint data flush */
	spinlock_t dev_lock;			/* protect dirty_device */
1145 1146
	struct mutex umount_mutex;
	unsigned int shrinker_run_no;
1147 1148 1149 1150

	/* For write statistics */
	u64 sectors_written_start;
	u64 kbytes_written;
1151 1152 1153

	/* Reference to checksum algorithm driver via cryptoapi */
	struct crypto_shash *s_chksum_driver;
1154

Chao Yu's avatar
Chao Yu committed
1155 1156 1157
	/* Precomputed FS UUID checksum for seeding other checksums */
	__u32 s_chksum_seed;

1158 1159 1160 1161
	/* For fault injection */
#ifdef CONFIG_F2FS_FAULT_INJECTION
	struct f2fs_fault_info fault_info;
#endif
Chao Yu's avatar
Chao Yu committed
1162 1163 1164 1165 1166 1167

#ifdef CONFIG_QUOTA
	/* Names of quota files with journalled quota */
	char *s_qf_names[MAXQUOTAS];
	int s_jquota_fmt;			/* Format of quota to use */
#endif
1168 1169
};

1170
#ifdef CONFIG_F2FS_FAULT_INJECTION
1171 1172 1173 1174
#define f2fs_show_injection_info(type)				\
	printk("%sF2FS-fs : inject %s in %s of %pF\n",		\
		KERN_INFO, fault_name[type],			\
		__func__, __builtin_return_address(0))
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
	struct f2fs_fault_info *ffi = &sbi->fault_info;

	if (!ffi->inject_rate)
		return false;

	if (!IS_FAULT_SET(ffi, type))
		return false;

	atomic_inc(&ffi->inject_ops);
	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
		atomic_set(&ffi->inject_ops, 0);
		return true;
	}
	return false;
}
#endif

1194 1195 1196 1197
/* For write statistics. Suppose sector size is 512 bytes,
 * and the return value is in kbytes. s is of struct f2fs_sb_info.
 */
#define BD_PART_WRITTEN(s)						 \
1198 1199
(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[1]) -		 \
		(s)->sectors_written_start) >> 1)
1200

1201 1202 1203 1204 1205 1206 1207
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
{
	sbi->last_time[type] = jiffies;
}

static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
{
Arnd Bergmann's avatar
Arnd Bergmann committed
1208
	unsigned long interval = sbi->interval_time[type] * HZ;
1209 1210 1211 1212

	return time_after(jiffies, sbi->last_time[type] + interval);
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
static inline bool is_idle(struct f2fs_sb_info *sbi)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	struct request_queue *q = bdev_get_queue(bdev);
	struct request_list *rl = &q->root_rl;

	if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
		return 0;

	return f2fs_time_over(sbi, REQ_TIME);
}

1225 1226 1227
/*
 * Inline functions
 */
1228 1229 1230 1231 1232
static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
			   unsigned int length)
{
	SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
	u32 *ctx = (u32 *)shash_desc_ctx(shash);
1233
	u32 retval;
1234 1235 1236 1237 1238 1239 1240 1241 1242
	int err;

	shash->tfm = sbi->s_chksum_driver;
	shash->flags = 0;
	*ctx = F2FS_SUPER_MAGIC;

	err = crypto_shash_update(shash, address, length);
	BUG_ON(err);

1243 1244 1245
	retval = *ctx;
	barrier_data(ctx);
	return retval;
1246 1247 1248 1249 1250 1251 1252 1253
}

static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
				  void *buf, size_t buf_size)
{
	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
}

Chao Yu's avatar
Chao Yu committed
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
			      const void *address, unsigned int length)
{
	struct {
		struct shash_desc shash;
		char ctx[4];
	} desc;
	int err;

	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));

	desc.shash.tfm = sbi->s_chksum_driver;
	desc.shash.flags = 0;
	*(u32 *)desc.ctx = crc;

	err = crypto_shash_update(&desc.shash, address, length);
	BUG_ON(err);

	return *(u32 *)desc.ctx;
}

1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
{
	return container_of(inode, struct f2fs_inode_info, vfs_inode);
}

static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
{
	return sb->s_fs_info;
}

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
{
	return F2FS_SB(inode->i_sb);
}

static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
{
	return F2FS_I_SB(mapping->host);
}

static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
{
	return F2FS_M_SB(page->mapping);
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
{
	return (struct f2fs_super_block *)(sbi->raw_super);
}

static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
{
	return (struct f2fs_checkpoint *)(sbi->ckpt);
}

1310 1311 1312 1313 1314
static inline struct f2fs_node *F2FS_NODE(struct page *page)
{
	return (struct f2fs_node *)page_address(page);
}

1315 1316 1317 1318 1319
static inline struct f2fs_inode *F2FS_INODE(struct page *page)
{
	return &((struct f2fs_node *)page_address(page))->i;
}

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
{
	return (struct f2fs_nm_info *)(sbi->nm_info);
}

static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
{
	return (struct f2fs_sm_info *)(sbi->sm_info);
}

static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
{
	return (struct sit_info *)(SM_I(sbi)->sit_info);
}

static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
{
	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
}

static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
{
	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
}

1345 1346 1347 1348 1349
static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
{
	return sbi->meta_inode->i_mapping;
}

1350 1351 1352 1353 1354
static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
{
	return sbi->node_inode->i_mapping;
}

1355 1356
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
1357
	return test_bit(type, &sbi->s_flag);
1358 1359 1360
}

static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1361
{
1362
	set_bit(type, &sbi->s_flag);
1363 1364
}

1365
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1366
{
1367
	clear_bit(type, &sbi->s_flag);
1368 1369
}

1370 1371 1372 1373 1374
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
{
	return le64_to_cpu(cp->checkpoint_ver);
}

1375 1376 1377 1378 1379 1380
static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
{
	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
}

1381
static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1382 1383
{
	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1384

1385 1386 1387
	return ckpt_flags & f;
}

1388
static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1389
{
1390 1391 1392 1393 1394 1395 1396 1397
	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
}

static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
	unsigned int ckpt_flags;

	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1398 1399 1400 1401
	ckpt_flags |= f;
	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}

1402
static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1403
{
1404 1405 1406
	unsigned long flags;

	spin_lock_irqsave(&sbi->cp_lock, flags);
1407
	__set_ckpt_flags(F2FS_CKPT(sbi), f);
1408
	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1409 1410 1411 1412 1413 1414 1415
}

static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
	unsigned int ckpt_flags;

	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1416 1417 1418 1419
	ckpt_flags &= (~f);
	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}

1420 1421
static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
1422 1423 1424
	unsigned long flags;

	spin_lock_irqsave(&sbi->cp_lock, flags);
1425
	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
1426
	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1427 1428
}

1429 1430
static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
{
1431 1432
	unsigned long flags;

1433 1434 1435
	set_sbi_flag(sbi, SBI_NEED_FSCK);

	if (lock)
1436
		spin_lock_irqsave(&sbi->cp_lock, flags);
1437 1438 1439 1440
	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
	kfree(NM_I(sbi)->nat_bits);
	NM_I(sbi)->nat_bits = NULL;
	if (lock)
1441
		spin_unlock_irqrestore(&sbi->cp_lock, flags);
1442 1443 1444 1445 1446 1447 1448
}

static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
					struct cp_control *cpc)
{
	bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);

1449
	return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
1450 1451
}

1452
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1453
{
1454
	down_read(&sbi->cp_rwsem);
1455 1456
}

1457 1458 1459 1460 1461
static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
{
	return down_read_trylock(&sbi->cp_rwsem);
}

1462
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1463
{
1464
	up_read(&sbi->cp_rwsem);
1465 1466
}

1467
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1468
{
1469
	down_write(&sbi->cp_rwsem);
1470 1471
}

1472
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1473
{
1474
	up_write(&sbi->cp_rwsem);
1475 1476
}

1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
{
	int reason = CP_SYNC;

	if (test_opt(sbi, FASTBOOT))
		reason = CP_FASTBOOT;
	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
		reason = CP_UMOUNT;
	return reason;
}

static inline bool __remain_node_summaries(int reason)
{
1490
	return (reason & (CP_UMOUNT | CP_FASTBOOT));
1491 1492 1493 1494
}

static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{
1495 1496
	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
1497 1498
}

1499 1500 1501
/*
 * Check whether the given nid is within node id range.
 */
1502
static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
1503
{
1504 1505
	if (unlikely(nid < F2FS_ROOT_INO(sbi)))
		return -EINVAL;
1506
	if (unlikely(nid >= NM_I(sbi)->max_nid))
1507 1508
		return -EINVAL;
	return 0;
1509 1510 1511 1512 1513 1514 1515
}

/*
 * Check whether the inode has blocks or not
 */
static inline int F2FS_HAS_BLOCKS(struct inode *inode)
{
1516 1517
	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;

1518
	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
1519 1520
}

1521 1522 1523 1524 1525
static inline bool f2fs_has_xattr_block(unsigned int ofs)
{
	return ofs == XATTR_NODE_OFFSET;
}

1526 1527
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
1528
				 struct inode *inode, blkcnt_t *count)
1529
{
1530
	blkcnt_t diff = 0, release = 0;
1531
	block_t avail_user_block_count;
1532 1533 1534 1535 1536
	int ret;

	ret = dquot_reserve_block(inode, *count);
	if (ret)
		return ret;
1537

Jaegeuk Kim's avatar
Jaegeuk Kim committed
1538
#ifdef CONFIG_F2FS_FAULT_INJECTION
1539 1540
	if (time_to_inject(sbi, FAULT_BLOCK)) {
		f2fs_show_injection_info(FAULT_BLOCK);
1541 1542
		release = *count;
		goto enospc;
1543
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1544
#endif
1545 1546 1547 1548 1549 1550
	/*
	 * let's increase this in prior to actual block count change in order
	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
	 */
	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));

1551 1552
	spin_lock(&sbi->stat_lock);
	sbi->total_valid_block_count += (block_t)(*count);
1553 1554 1555
	avail_user_block_count = sbi->user_block_count - sbi->reserved_blocks;
	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
		diff = sbi->total_valid_block_count - avail_user_block_count;
1556
		*count -= diff;
1557
		release = diff;
1558
		sbi->total_valid_block_count = avail_user_block_count;
1559 1560
		if (!*count) {
			spin_unlock(&sbi->stat_lock);
1561
			percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
1562
			goto enospc;
1563
		}
1564 1565
	}
	spin_unlock(&sbi->stat_lock);
1566

1567 1568 1569 1570 1571 1572 1573 1574
	if (release)
		dquot_release_reservation_block(inode, release);
	f2fs_i_blocks_write(inode, *count, true, true);
	return 0;

enospc:
	dquot_release_reservation_block(inode, release);
	return -ENOSPC;
1575 1576
}

1577
static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
1578
						struct inode *inode,
1579
						block_t count)
1580
{
1581 1582
	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;

1583
	spin_lock(&sbi->stat_lock);
1584
	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
1585
	f2fs_bug_on(sbi, inode->i_blocks < sectors);
1586 1587
	sbi->total_valid_block_count -= (block_t)count;
	spin_unlock(&sbi->stat_lock);
1588
	f2fs_i_blocks_write(inode, count, false, true);
1589 1590 1591 1592
}

static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
1593
	atomic_inc(&sbi->nr_pages[count_type]);
1594

1595 1596
	if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
		count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
1597 1598
		return;

1599
	set_sbi_flag(sbi, SBI_IS_DIRTY);
1600 1601
}

1602
static inline void inode_inc_dirty_pages(struct inode *inode)
1603
{
1604
	atomic_inc(&F2FS_I(inode)->dirty_pages);
1605 1606
	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
1607 1608 1609 1610
}

static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
{
1611
	atomic_dec(&sbi->nr_pages[count_type]);
1612 1613
}

1614
static inline void inode_dec_dirty_pages(struct inode *inode)
1615
{
1616 1617
	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
			!S_ISLNK(inode->i_mode))
1618 1619
		return;

1620
	atomic_dec(&F2FS_I(inode)->dirty_pages);
1621 1622
	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
1623 1624
}

1625
static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
1626
{
1627
	return atomic_read(&sbi->nr_pages[count_type]);
1628 1629
}

1630
static inline int get_dirty_pages(struct inode *inode)
1631
{
1632
	return atomic_read(&F2FS_I(inode)->dirty_pages);
1633 1634
}

1635 1636
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{
1637
	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
1638 1639 1640 1641
	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
						sbi->log_blocks_per_seg;

	return segs / sbi->segs_per_sec;
1642 1643
}

1644 1645
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
{
1646
	return sbi->total_valid_block_count;
1647 1648
}

1649 1650 1651 1652 1653
static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
{
	return sbi->discard_blks;
}

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
{
	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);

	/* return NAT or SIT bitmap */
	if (flag == NAT_BITMAP)
		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
	else if (flag == SIT_BITMAP)
		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);

	return 0;
}

1667 1668 1669 1670 1671
static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
{
	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
}

1672 1673 1674
static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
{
	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Changman Lee's avatar
Changman Lee committed
1675 1676
	int offset;

1677
	if (__cp_payload(sbi) > 0) {
Changman Lee's avatar
Changman Lee committed
1678 1679 1680
		if (flag == NAT_BITMAP)
			return &ckpt->sit_nat_version_bitmap;
		else
Jaegeuk Kim's avatar
Jaegeuk Kim committed
1681
			return (unsigned char *)ckpt + F2FS_BLKSIZE;
Changman Lee's avatar
Changman Lee committed
1682 1683
	} else {
		offset = (flag == NAT_BITMAP) ?
1684
			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
Changman Lee's avatar
Changman Lee committed
1685 1686
		return &ckpt->sit_nat_version_bitmap + offset;
	}
1687 1688 1689 1690
}

static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
1691
	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
1692

1693
	if (sbi->cur_cp_pack == 2)
1694
		start_addr += sbi->blocks_per_seg;
1695 1696
	return start_addr;
}
1697

1698 1699 1700
static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
{
	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
1701

1702 1703
	if (sbi->cur_cp_pack == 1)
		start_addr += sbi->blocks_per_seg;
1704 1705 1706
	return start_addr;
}

1707 1708 1709 1710 1711
static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
{
	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
}

1712 1713 1714 1715 1716
static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
{
	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
}

1717
static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
1718
					struct inode *inode, bool is_inode)
1719 1720 1721
{
	block_t	valid_block_count;
	unsigned int valid_node_count;
1722 1723 1724 1725 1726 1727 1728
	bool quota = inode && !is_inode;

	if (quota) {
		int ret = dquot_reserve_block(inode, 1);
		if (ret)
			return ret;
	}
1729 1730 1731

	spin_lock(&sbi->stat_lock);

1732
	valid_block_count = sbi->total_valid_block_count + 1;
1733 1734
	if (unlikely(valid_block_count + sbi->reserved_blocks >
						sbi->user_block_count)) {
1735
		spin_unlock(&sbi->stat_lock);
1736
		goto enospc;
1737 1738
	}

1739
	valid_node_count = sbi->total_valid_node_count + 1;
1740
	if (unlikely(valid_node_count > sbi->total_node_count)) {
1741
		spin_unlock(&sbi->stat_lock);
1742
		goto enospc;
1743 1744
	}

1745 1746
	sbi->total_valid_node_count++;
	sbi->total_valid_block_count++;
1747 1748
	spin_unlock(&sbi->stat_lock);

1749 1750 1751 1752
	if (inode) {
		if (is_inode)
			f2fs_mark_inode_dirty_sync(inode, true);
		else
1753
			f2fs_i_blocks_write(inode, 1, true, true);
1754
	}
1755

1756
	percpu_counter_inc(&sbi->alloc_valid_block_count);
1757 1758 1759 1760 1761 1762
	return 0;

enospc:
	if (quota)
		dquot_release_reservation_block(inode, 1);
	return -ENOSPC;
1763 1764 1765
}

static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
1766
					struct inode *inode, bool is_inode)
1767 1768 1769
{
	spin_lock(&sbi->stat_lock);

1770 1771
	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
1772
	f2fs_bug_on(sbi, !is_inode && !inode->i_blocks);
1773

1774 1775
	sbi->total_valid_node_count--;
	sbi->total_valid_block_count--;
1776 1777

	spin_unlock(&sbi->stat_lock);
1778 1779 1780

	if (!is_inode)
		f2fs_i_blocks_write(inode, 1, false, true);
1781 1782 1783 1784
}

static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
{
1785
	return sbi->total_valid_node_count;
1786 1787 1788 1789
}

static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{
1790
	percpu_counter_inc(&sbi->total_valid_inode_count);
1791 1792
}

1793
static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
1794
{
1795
	percpu_counter_dec(&sbi->total_valid_inode_count);
1796 1797
}

1798
static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
1799
{
1800
	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
1801 1802
}

1803 1804 1805
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
						pgoff_t index, bool for_write)
{
1806 1807
#ifdef CONFIG_F2FS_FAULT_INJECTION
	struct page *page = find_lock_page(mapping, index);
1808

1809 1810 1811
	if (page)
		return page;

1812 1813
	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
		f2fs_show_injection_info(FAULT_PAGE_ALLOC);
1814
		return NULL;
1815
	}
1816
#endif
1817 1818 1819 1820 1821
	if (!for_write)
		return grab_cache_page(mapping, index);
	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
}

1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
static inline void f2fs_copy_page(struct page *src, struct page *dst)
{
	char *src_kaddr = kmap(src);
	char *dst_kaddr = kmap(dst);

	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
	kunmap(dst);
	kunmap(src);
}

1832 1833
static inline void f2fs_put_page(struct page *page, int unlock)
{
1834
	if (!page)
1835 1836 1837
		return;

	if (unlock) {
1838
		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
1839 1840
		unlock_page(page);
	}
1841
	put_page(page);
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
}

static inline void f2fs_put_dnode(struct dnode_of_data *dn)
{
	if (dn->node_page)
		f2fs_put_page(dn->node_page, 1);
	if (dn->inode_page && dn->node_page != dn->inode_page)
		f2fs_put_page(dn->inode_page, 0);
	dn->node_page = NULL;
	dn->inode_page = NULL;
}

static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
1855
					size_t size)
1856
{
1857
	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
1858 1859
}

1860 1861 1862 1863 1864
static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
						gfp_t flags)
{
	void *entry;

1865 1866 1867
	entry = kmem_cache_alloc(cachep, flags);
	if (!entry)
		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
1868 1869 1870
	return entry;
}

1871 1872 1873 1874 1875 1876
static inline struct bio *f2fs_bio_alloc(int npages)
{
	struct bio *bio;

	/* No failure on bio allocation */
	bio = bio_alloc(GFP_NOIO, npages);
1877 1878
	if (!bio)
		bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
1879 1880 1881
	return bio;
}

1882 1883 1884 1885 1886 1887 1888
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
				unsigned long index, void *item)
{
	while (radix_tree_insert(root, index, item))
		cond_resched();
}

1889 1890 1891 1892
#define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)

static inline bool IS_INODE(struct page *page)
{
1893
	struct f2fs_node *p = F2FS_NODE(page);
1894

1895 1896 1897
	return RAW_IS_INODE(p);
}

1898 1899 1900 1901 1902 1903
static inline int offset_in_addr(struct f2fs_inode *i)
{
	return (i->i_inline & F2FS_EXTRA_ATTR) ?
			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
}

1904 1905 1906 1907 1908
static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
{
	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
}

1909 1910 1911
static inline int f2fs_has_extra_attr(struct inode *inode);
static inline block_t datablock_addr(struct inode *inode,
			struct page *node_page, unsigned int offset)
1912 1913 1914
{
	struct f2fs_node *raw_node;
	__le32 *addr_array;
1915 1916
	int base = 0;
	bool is_inode = IS_INODE(node_page);
1917

1918
	raw_node = F2FS_NODE(node_page);
1919 1920 1921 1922 1923 1924 1925 1926 1927

	/* from GC path only */
	if (!inode) {
		if (is_inode)
			base = offset_in_addr(&raw_node->i);
	} else if (f2fs_has_extra_attr(inode) && is_inode) {
		base = get_extra_isize(inode);
	}

1928
	addr_array = blkaddr_in_node(raw_node);
1929
	return le32_to_cpu(addr_array[base + offset]);
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
}

static inline int f2fs_test_bit(unsigned int nr, char *addr)
{
	int mask;

	addr += (nr >> 3);
	mask = 1 << (7 - (nr & 0x07));
	return mask & *addr;
}

1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
static inline void f2fs_set_bit(unsigned int nr, char *addr)
{
	int mask;

	addr += (nr >> 3);
	mask = 1 << (7 - (nr & 0x07));
	*addr |= mask;
}

static inline void f2fs_clear_bit(unsigned int nr, char *addr)
{
	int mask;

	addr += (nr >> 3);
	mask = 1 << (7 - (nr & 0x07));
	*addr &= ~mask;
}

1959
static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
{
	int mask;
	int ret;

	addr += (nr >> 3);
	mask = 1 << (7 - (nr & 0x07));
	ret = mask & *addr;
	*addr |= mask;
	return ret;
}

1971
static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
{
	int mask;
	int ret;

	addr += (nr >> 3);
	mask = 1 << (7 - (nr & 0x07));
	ret = mask & *addr;
	*addr &= ~mask;
	return ret;
}

1983 1984 1985 1986 1987 1988 1989 1990 1991
static inline void f2fs_change_bit(unsigned int nr, char *addr)
{
	int mask;

	addr += (nr >> 3);
	mask = 1 << (7 - (nr & 0x07));
	*addr ^= mask;
}

Chao Yu's avatar
Chao Yu committed
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
#define F2FS_REG_FLMASK		(~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
#define F2FS_OTHER_FLMASK	(FS_NODUMP_FL | FS_NOATIME_FL)
#define F2FS_FL_INHERITED	(FS_PROJINHERIT_FL)

static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
{
	if (S_ISDIR(mode))
		return flags;
	else if (S_ISREG(mode))
		return flags & F2FS_REG_FLMASK;
	else
		return flags & F2FS_OTHER_FLMASK;
}

2006 2007 2008
/* used for f2fs_inode_info->flags */
enum {
	FI_NEW_INODE,		/* indicate newly allocated inode */
2009
	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
2010
	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
2011
	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
2012 2013 2014
	FI_INC_LINK,		/* need to increment i_nlink */
	FI_ACL_MODE,		/* indicate acl mode */
	FI_NO_ALLOC,		/* should not allocate any blocks */
2015
	FI_FREE_NID,		/* free allocated nide */
2016
	FI_NO_EXTENT,		/* not to use the extent cache */
2017
	FI_INLINE_XATTR,	/* used for inline xattr */
2018
	FI_INLINE_DATA,		/* used for inline data*/
2019
	FI_INLINE_DENTRY,	/* used for inline dentry */
2020 2021
	FI_APPEND_WRITE,	/* inode has appended data */
	FI_UPDATE_WRITE,	/* inode has in-place-update data */
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2022 2023
	FI_NEED_IPU,		/* used for ipu per file */
	FI_ATOMIC_FILE,		/* indicate atomic file */
Chao Yu's avatar
Chao Yu committed
2024
	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
2025
	FI_VOLATILE_FILE,	/* indicate volatile file */
2026
	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
2027
	FI_DROP_CACHE,		/* drop dirty page cache */
2028
	FI_DATA_EXIST,		/* indicate data exists */
2029
	FI_INLINE_DOTS,		/* indicate inline dot dentries */
Chao Yu's avatar
Chao Yu committed
2030
	FI_DO_DEFRAG,		/* indicate defragment is running */
2031
	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
2032
	FI_NO_PREALLOC,		/* indicate skipped preallocated blocks */
2033
	FI_HOT_DATA,		/* indicate file is hot */
2034
	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
Chao Yu's avatar
Chao Yu committed
2035
	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
2036 2037
};

2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
static inline void __mark_inode_dirty_flag(struct inode *inode,
						int flag, bool set)
{
	switch (flag) {
	case FI_INLINE_XATTR:
	case FI_INLINE_DATA:
	case FI_INLINE_DENTRY:
		if (set)
			return;
	case FI_DATA_EXIST:
	case FI_INLINE_DOTS:
2049
		f2fs_mark_inode_dirty_sync(inode, true);
2050 2051 2052
	}
}

2053
static inline void set_inode_flag(struct inode *inode, int flag)
2054
{
2055 2056
	if (!test_bit(flag, &F2FS_I(inode)->flags))
		set_bit(flag, &F2FS_I(inode)->flags);
2057
	__mark_inode_dirty_flag(inode, flag, true);
2058 2059
}

2060
static inline int is_inode_flag_set(struct inode *inode, int flag)
2061
{
2062
	return test_bit(flag, &F2FS_I(inode)->flags);
2063 2064
}

2065
static inline void clear_inode_flag(struct inode *inode, int flag)
2066
{
2067 2068
	if (test_bit(flag, &F2FS_I(inode)->flags))
		clear_bit(flag, &F2FS_I(inode)->flags);
2069
	__mark_inode_dirty_flag(inode, flag, false);
2070 2071
}

2072
static inline void set_acl_inode(struct inode *inode, umode_t mode)
2073
{
2074 2075
	F2FS_I(inode)->i_acl_mode = mode;
	set_inode_flag(inode, FI_ACL_MODE);
2076
	f2fs_mark_inode_dirty_sync(inode, false);
2077 2078
}

2079
static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2080
{
2081 2082 2083 2084
	if (inc)
		inc_nlink(inode);
	else
		drop_nlink(inode);
2085
	f2fs_mark_inode_dirty_sync(inode, true);
2086 2087
}

2088
static inline void f2fs_i_blocks_write(struct inode *inode,
2089
					block_t diff, bool add, bool claim)
2090
{
2091 2092 2093
	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);

2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
	if (add) {
		if (claim)
			dquot_claim_block(inode, diff);
		else
			dquot_alloc_block_nofail(inode, diff);
	} else {
		dquot_free_block(inode, diff);
	}

2104
	f2fs_mark_inode_dirty_sync(inode, true);
2105 2106
	if (clean || recover)
		set_inode_flag(inode, FI_AUTO_RECOVER);
2107 2108
}

2109 2110
static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
{
2111 2112 2113
	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);

2114 2115 2116 2117
	if (i_size_read(inode) == i_size)
		return;

	i_size_write(inode, i_size);
2118
	f2fs_mark_inode_dirty_sync(inode, true);
2119 2120
	if (clean || recover)
		set_inode_flag(inode, FI_AUTO_RECOVER);
2121 2122
}

2123
static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2124
{
2125
	F2FS_I(inode)->i_current_depth = depth;
2126
	f2fs_mark_inode_dirty_sync(inode, true);
2127 2128
}

2129
static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2130
{
2131
	F2FS_I(inode)->i_xattr_nid = xnid;
2132
	f2fs_mark_inode_dirty_sync(inode, true);
2133 2134 2135 2136 2137
}

static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
{
	F2FS_I(inode)->i_pino = pino;
2138
	f2fs_mark_inode_dirty_sync(inode, true);
2139 2140
}

2141
static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2142
{
2143 2144
	struct f2fs_inode_info *fi = F2FS_I(inode);

2145
	if (ri->i_inline & F2FS_INLINE_XATTR)
2146
		set_bit(FI_INLINE_XATTR, &fi->flags);
2147
	if (ri->i_inline & F2FS_INLINE_DATA)
2148
		set_bit(FI_INLINE_DATA, &fi->flags);
2149
	if (ri->i_inline & F2FS_INLINE_DENTRY)
2150
		set_bit(FI_INLINE_DENTRY, &fi->flags);
2151
	if (ri->i_inline & F2FS_DATA_EXIST)
2152
		set_bit(FI_DATA_EXIST, &fi->flags);
2153
	if (ri->i_inline & F2FS_INLINE_DOTS)
2154
		set_bit(FI_INLINE_DOTS, &fi->flags);
2155 2156
	if (ri->i_inline & F2FS_EXTRA_ATTR)
		set_bit(FI_EXTRA_ATTR, &fi->flags);
2157 2158
}

2159
static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2160 2161 2162
{
	ri->i_inline = 0;

2163
	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2164
		ri->i_inline |= F2FS_INLINE_XATTR;
2165
	if (is_inode_flag_set(inode, FI_INLINE_DATA))
2166
		ri->i_inline |= F2FS_INLINE_DATA;
2167
	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2168
		ri->i_inline |= F2FS_INLINE_DENTRY;
2169
	if (is_inode_flag_set(inode, FI_DATA_EXIST))
2170
		ri->i_inline |= F2FS_DATA_EXIST;
2171
	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2172
		ri->i_inline |= F2FS_INLINE_DOTS;
2173 2174 2175 2176 2177 2178 2179
	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
		ri->i_inline |= F2FS_EXTRA_ATTR;
}

static inline int f2fs_has_extra_attr(struct inode *inode)
{
	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2180 2181
}

2182 2183
static inline int f2fs_has_inline_xattr(struct inode *inode)
{
2184
	return is_inode_flag_set(inode, FI_INLINE_XATTR);
2185 2186
}

2187
static inline unsigned int addrs_per_inode(struct inode *inode)
2188
{
2189
	if (f2fs_has_inline_xattr(inode))
2190 2191
		return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS;
	return CUR_ADDRS_PER_INODE(inode);
2192 2193
}

2194 2195
static inline void *inline_xattr_addr(struct page *page)
{
2196
	struct f2fs_inode *ri = F2FS_INODE(page);
2197

2198 2199 2200 2201 2202 2203
	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
					F2FS_INLINE_XATTR_ADDRS]);
}

static inline int inline_xattr_size(struct inode *inode)
{
2204
	if (f2fs_has_inline_xattr(inode))
2205 2206 2207 2208 2209
		return F2FS_INLINE_XATTR_ADDRS << 2;
	else
		return 0;
}

2210 2211
static inline int f2fs_has_inline_data(struct inode *inode)
{
2212
	return is_inode_flag_set(inode, FI_INLINE_DATA);
2213 2214
}

2215 2216
static inline int f2fs_exist_data(struct inode *inode)
{
2217
	return is_inode_flag_set(inode, FI_DATA_EXIST);
2218 2219
}

2220 2221
static inline int f2fs_has_inline_dots(struct inode *inode)
{
2222
	return is_inode_flag_set(inode, FI_INLINE_DOTS);
2223 2224
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2225 2226
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
2227
	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2228 2229
}

Chao Yu's avatar
Chao Yu committed
2230 2231 2232 2233 2234
static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
{
	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
}

2235 2236
static inline bool f2fs_is_volatile_file(struct inode *inode)
{
2237
	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
2238 2239
}

2240 2241
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
2242
	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
2243 2244
}

2245 2246
static inline bool f2fs_is_drop_cache(struct inode *inode)
{
2247
	return is_inode_flag_set(inode, FI_DROP_CACHE);
2248 2249
}

2250
static inline void *inline_data_addr(struct inode *inode, struct page *page)
2251
{
2252
	struct f2fs_inode *ri = F2FS_INODE(page);
2253
	int extra_size = get_extra_isize(inode);
2254

2255
	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
2256 2257
}

2258 2259
static inline int f2fs_has_inline_dentry(struct inode *inode)
{
2260
	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
2261 2262
}

2263 2264 2265 2266 2267 2268
static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
{
	if (!f2fs_has_inline_dentry(dir))
		kunmap(page);
}

2269 2270 2271 2272 2273 2274 2275 2276
static inline int is_file(struct inode *inode, int type)
{
	return F2FS_I(inode)->i_advise & type;
}

static inline void set_file(struct inode *inode, int type)
{
	F2FS_I(inode)->i_advise |= type;
2277
	f2fs_mark_inode_dirty_sync(inode, true);
2278 2279 2280 2281 2282
}

static inline void clear_file(struct inode *inode, int type)
{
	F2FS_I(inode)->i_advise &= ~type;
2283
	f2fs_mark_inode_dirty_sync(inode, true);
2284 2285
}

2286 2287
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
2288 2289
	bool ret;

2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
	if (dsync) {
		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

		spin_lock(&sbi->inode_lock[DIRTY_META]);
		ret = list_empty(&F2FS_I(inode)->gdirty_list);
		spin_unlock(&sbi->inode_lock[DIRTY_META]);
		return ret;
	}
	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
			file_keep_isize(inode) ||
			i_size_read(inode) & PAGE_MASK)
		return false;
2302 2303 2304 2305 2306 2307

	down_read(&F2FS_I(inode)->i_sem);
	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
	up_read(&F2FS_I(inode)->i_sem);

	return ret;
2308 2309
}

Jaegeuk Kim's avatar
Jaegeuk Kim committed
2310 2311 2312 2313 2314
static inline int f2fs_readonly(struct super_block *sb)
{
	return sb->s_flags & MS_RDONLY;
}

2315 2316
static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
{
2317
	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
2318 2319
}

2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
static inline bool is_dot_dotdot(const struct qstr *str)
{
	if (str->len == 1 && str->name[0] == '.')
		return true;

	if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
		return true;

	return false;
}

2331 2332 2333
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
2334
			is_inode_flag_set(inode, FI_NO_EXTENT))
2335 2336
		return false;

2337
	return S_ISREG(inode->i_mode);
2338 2339
}

2340 2341
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
					size_t size, gfp_t flags)
2342
{
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2343
#ifdef CONFIG_F2FS_FAULT_INJECTION
2344 2345
	if (time_to_inject(sbi, FAULT_KMALLOC)) {
		f2fs_show_injection_info(FAULT_KMALLOC);
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2346
		return NULL;
2347
	}
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2348
#endif
2349 2350 2351
	return kmalloc(size, flags);
}

2352
static inline int get_extra_isize(struct inode *inode)
2353
{
2354
	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
2355 2356
}

2357
#define get_inode_mode(i) \
2358
	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
2359 2360
	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))

2361 2362 2363 2364
#define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
	(offsetof(struct f2fs_inode, i_extra_end) -	\
	offsetof(struct f2fs_inode, i_extra_isize))	\

Chao Yu's avatar
Chao Yu committed
2365 2366 2367 2368 2369 2370
#define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
#define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
		((offsetof(typeof(*f2fs_inode), field) +	\
		sizeof((f2fs_inode)->field))			\
		<= (F2FS_OLD_ATTRIBUTE_SIZE + extra_isize))	\

Chao Yu's avatar
Chao Yu committed
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
{
	int i;

	spin_lock(&sbi->iostat_lock);
	for (i = 0; i < NR_IO_TYPE; i++)
		sbi->write_iostat[i] = 0;
	spin_unlock(&sbi->iostat_lock);
}

static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
			enum iostat_type type, unsigned long long io_bytes)
{
	if (!sbi->iostat_enable)
		return;
	spin_lock(&sbi->iostat_lock);
	sbi->write_iostat[type] += io_bytes;

	if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
		sbi->write_iostat[APP_BUFFERED_IO] =
			sbi->write_iostat[APP_WRITE_IO] -
			sbi->write_iostat[APP_DIRECT_IO];
	spin_unlock(&sbi->iostat_lock);
}

2396 2397 2398
/*
 * file.c
 */
2399 2400 2401 2402
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void truncate_data_blocks(struct dnode_of_data *dn);
int truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
2403 2404
int f2fs_getattr(const struct path *path, struct kstat *stat,
			u32 request_mask, unsigned int flags);
2405 2406 2407 2408 2409
int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
int truncate_data_blocks_range(struct dnode_of_data *dn, int count);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
2410 2411 2412 2413

/*
 * inode.c
 */
2414
void f2fs_set_inode_flags(struct inode *inode);
Chao Yu's avatar
Chao Yu committed
2415 2416
bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
2417 2418 2419 2420 2421 2422 2423 2424
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
int update_inode(struct inode *inode, struct page *node_page);
int update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
void f2fs_evict_inode(struct inode *inode);
void handle_failed_inode(struct inode *inode);
2425 2426 2427 2428 2429 2430 2431 2432 2433

/*
 * namei.c
 */
struct dentry *f2fs_get_parent(struct dentry *child);

/*
 * dir.c
 */
2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
void set_de_type(struct f2fs_dir_entry *de, umode_t mode);
unsigned char get_de_type(struct f2fs_dir_entry *de);
struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
			f2fs_hash_t namehash, int *max_slots,
			struct f2fs_dentry_ptr *d);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
			unsigned int start_pos, struct fscrypt_str *fstr);
void do_make_empty_dir(struct inode *inode, struct inode *parent,
			struct f2fs_dentry_ptr *d);
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
			const struct qstr *new_name,
			const struct qstr *orig_name, struct page *dpage);
void update_parent_metadata(struct inode *dir, struct inode *inode,
			unsigned int current_depth);
int room_for_filename(const void *bitmap, int slots, int max_slots);
void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
			struct fscrypt_name *fname, struct page **res_page);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
			const struct qstr *child, struct page **res_page);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
			struct page **page);
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
			struct page *page, struct inode *inode);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
			const struct qstr *name, f2fs_hash_t name_hash,
			unsigned int bit_pos);
int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
			const struct qstr *orig_name,
			struct inode *inode, nid_t ino, umode_t mode);
int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
			struct inode *inode, nid_t ino, umode_t mode);
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
			struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
			struct inode *dir, struct inode *inode);
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
bool f2fs_empty_dir(struct inode *dir);
2473

2474 2475
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
2476
	return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name,
2477
				inode, inode->i_ino, inode->i_mode);
2478 2479
}

2480 2481 2482
/*
 * super.c
 */
2483 2484
int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
Chao Yu's avatar
Chao Yu committed
2485 2486
void f2fs_enable_quota_files(struct f2fs_sb_info *sbi);
void f2fs_quota_off_umount(struct super_block *sb);
2487 2488
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
2489
extern __printf(3, 4)
2490
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
2491
int sanity_check_ckpt(struct f2fs_sb_info *sbi);
2492 2493 2494 2495

/*
 * hash.c
 */
2496 2497
f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
				struct fscrypt_name *fname);
2498 2499 2500 2501 2502 2503 2504

/*
 * node.c
 */
struct dnode_of_data;
struct node_info;

2505 2506 2507 2508 2509 2510 2511 2512
bool available_free_memory(struct f2fs_sb_info *sbi, int type);
int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni);
pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int truncate_inode_blocks(struct inode *inode, pgoff_t from);
2513
int truncate_xattr_node(struct inode *inode);
2514 2515 2516
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
int remove_inode_page(struct inode *inode);
struct page *new_inode_page(struct inode *inode);
2517
struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs);
2518 2519 2520 2521 2522 2523
void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *get_node_page_ra(struct page *parent, int start);
void move_node_page(struct page *node_page, int gc_type);
int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
			struct writeback_control *wbc, bool atomic);
2524
int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
Chao Yu's avatar
Chao Yu committed
2525
			bool do_balance, enum iostat_type io_type);
2526
void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
2527 2528 2529 2530 2531
bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
void recover_inline_xattr(struct inode *inode, struct page *page);
2532
int recover_xattr_data(struct inode *inode, struct page *page,
2533 2534 2535 2536
			block_t blkaddr);
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
int restore_node_summary(struct f2fs_sb_info *sbi,
			unsigned int segno, struct f2fs_summary_block *sum);
2537
void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
2538 2539
int build_node_manager(struct f2fs_sb_info *sbi);
void destroy_node_manager(struct f2fs_sb_info *sbi);
2540
int __init create_node_manager_caches(void);
2541 2542 2543 2544 2545
void destroy_node_manager_caches(void);

/*
 * segment.c
 */
2546
bool need_SSR(struct f2fs_sb_info *sbi);
2547
void register_inmem_page(struct inode *inode, struct page *page);
2548
void drop_inmem_pages_all(struct f2fs_sb_info *sbi);
2549
void drop_inmem_pages(struct inode *inode);
2550
void drop_inmem_page(struct inode *inode, struct page *page);
2551 2552 2553
int commit_inmem_pages(struct inode *inode);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
2554
int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
2555
int create_flush_cmd_control(struct f2fs_sb_info *sbi);
2556
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
2557 2558 2559
void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
Chao Yu's avatar
Chao Yu committed
2560 2561
void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
						unsigned int granularity);
2562
void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
2563
void stop_discard_thread(struct f2fs_sb_info *sbi);
2564
bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
2565 2566 2567 2568 2569 2570 2571 2572
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void release_discard_addrs(struct f2fs_sb_info *sbi);
int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
void allocate_new_segments(struct f2fs_sb_info *sbi);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc);
struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr);
Chao Yu's avatar
Chao Yu committed
2573 2574
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
						enum iostat_type io_type);
2575 2576
void write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio);
2577
int rewrite_data_page(struct f2fs_io_info *fio);
2578 2579 2580 2581 2582 2583 2584 2585 2586
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
			block_t old_blkaddr, block_t new_blkaddr,
			bool recover_curseg, bool recover_newaddr);
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
			block_t old_addr, block_t new_addr,
			unsigned char version, bool recover_curseg,
			bool recover_newaddr);
void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
			block_t old_blkaddr, block_t *new_blkaddr,
2587 2588
			struct f2fs_summary *sum, int type,
			struct f2fs_io_info *fio, bool add_list);
2589 2590
void f2fs_wait_on_page_writeback(struct page *page,
			enum page_type type, bool ordered);
2591
void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr);
2592 2593 2594 2595 2596 2597 2598
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
			unsigned int val, int alloc);
void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int build_segment_manager(struct f2fs_sb_info *sbi);
void destroy_segment_manager(struct f2fs_sb_info *sbi);
2599 2600
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
2601 2602 2603 2604

/*
 * checkpoint.c
 */
2605 2606 2607 2608 2609 2610 2611 2612 2613
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type);
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
			int type, bool sync);
void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
Chao Yu's avatar
Chao Yu committed
2614
			long nr_to_write, enum iostat_type io_type);
2615 2616 2617 2618
void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
void release_ino_entry(struct f2fs_sb_info *sbi, bool all);
bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
2619 2620 2621 2622
void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
					unsigned int devidx, int type);
bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
					unsigned int devidx, int type);
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
int acquire_orphan_inode(struct f2fs_sb_info *sbi);
void release_orphan_inode(struct f2fs_sb_info *sbi);
void add_orphan_inode(struct inode *inode);
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
int recover_orphan_inodes(struct f2fs_sb_info *sbi);
int get_valid_checkpoint(struct f2fs_sb_info *sbi);
void update_dirty_page(struct inode *inode, struct page *page);
void remove_dirty_inode(struct inode *inode);
int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void init_ino_entry_info(struct f2fs_sb_info *sbi);
2635
int __init create_checkpoint_caches(void);
2636 2637 2638 2639 2640
void destroy_checkpoint_caches(void);

/*
 * data.c
 */
2641 2642
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
2643
				struct inode *inode, nid_t ino, pgoff_t idx,
2644 2645
				enum page_type type);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
2646
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
2647
int f2fs_submit_page_write(struct f2fs_io_info *fio);
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
			block_t blk_addr, struct bio *bio);
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
void set_data_blkaddr(struct dnode_of_data *dn);
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
int reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
			int op_flags, bool for_write);
struct page *find_data_page(struct inode *inode, pgoff_t index);
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
			bool for_write);
struct page *get_new_data_page(struct inode *inode,
			struct page *ipage, pgoff_t index, bool new_i_size);
int do_write_data_page(struct f2fs_io_info *fio);
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
			int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
			u64 start, u64 len);
void f2fs_set_page_dirty_nobuffers(struct page *page);
Chao Yu's avatar
Chao Yu committed
2671 2672 2673
int __f2fs_write_data_pages(struct address_space *mapping,
						struct writeback_control *wbc,
						enum iostat_type io_type);
2674 2675 2676
void f2fs_invalidate_page(struct page *page, unsigned int offset,
			unsigned int length);
int f2fs_release_page(struct page *page, gfp_t wait);
2677
#ifdef CONFIG_MIGRATION
2678 2679
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
			struct page *page, enum migrate_mode mode);
2680
#endif
2681 2682 2683 2684

/*
 * gc.c
 */
2685 2686 2687
int start_gc_thread(struct f2fs_sb_info *sbi);
void stop_gc_thread(struct f2fs_sb_info *sbi);
block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
2688 2689
int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
			unsigned int segno);
2690
void build_gc_manager(struct f2fs_sb_info *sbi);
2691 2692 2693 2694

/*
 * recovery.c
 */
2695 2696
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
bool space_for_roll_forward(struct f2fs_sb_info *sbi);
2697 2698 2699 2700 2701 2702 2703 2704 2705 2706

/*
 * debug.c
 */
#ifdef CONFIG_F2FS_STAT_FS
struct f2fs_stat_info {
	struct list_head stat_list;
	struct f2fs_sb_info *sbi;
	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
	int main_area_segs, main_area_sections, main_area_zones;
2707 2708
	unsigned long long hit_largest, hit_cached, hit_rbtree;
	unsigned long long hit_total, total_ext;
2709
	int ext_tree, zombie_tree, ext_node;
2710 2711
	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
	int inmem_pages;
2712
	unsigned int ndirty_dirs, ndirty_files, ndirty_all;
2713 2714
	int nats, dirty_nats, sits, dirty_sits;
	int free_nids, avail_nids, alloc_nids;
2715
	int total_count, utilization;
2716
	int bg_gc, nr_wb_cp_data, nr_wb_data;
2717 2718
	int nr_flushing, nr_flushed, flush_list_empty;
	int nr_discarding, nr_discarded;
2719
	int nr_discard_cmd;
Chao Yu's avatar
Chao Yu committed
2720
	unsigned int undiscard_blks;
2721
	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
2722
	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
2723
	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
2724 2725 2726 2727
	unsigned int bimodal, avg_vblocks;
	int util_free, util_valid, util_invalid;
	int rsvd_segs, overp_segs;
	int dirty_count, node_pages, meta_pages;
2728
	int prefree_count, call_count, cp_count, bg_cp_count;
2729
	int tot_segs, node_segs, data_segs, free_segs, free_secs;
2730
	int bg_node_segs, bg_data_segs;
2731
	int tot_blks, data_blks, node_blks;
2732
	int bg_data_blks, bg_node_blks;
2733 2734 2735 2736 2737 2738
	int curseg[NR_CURSEG_TYPE];
	int cursec[NR_CURSEG_TYPE];
	int curzone[NR_CURSEG_TYPE];

	unsigned int segment_count[2];
	unsigned int block_count[2];
2739
	unsigned int inplace_count;
2740
	unsigned long long base_mem, cache_mem, page_mem;
2741 2742
};

2743 2744
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
{
2745
	return (struct f2fs_stat_info *)sbi->stat_info;
2746 2747
}

2748
#define stat_inc_cp_count(si)		((si)->cp_count++)
2749
#define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
2750 2751
#define stat_inc_call_count(si)		((si)->call_count++)
#define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
2752 2753
#define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
#define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
2754 2755 2756 2757
#define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
#define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
#define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
#define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
2758 2759 2760 2761 2762 2763 2764 2765 2766 2767
#define stat_inc_inline_xattr(inode)					\
	do {								\
		if (f2fs_has_inline_xattr(inode))			\
			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
	} while (0)
#define stat_dec_inline_xattr(inode)					\
	do {								\
		if (f2fs_has_inline_xattr(inode))			\
			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
	} while (0)
2768 2769 2770
#define stat_inc_inline_inode(inode)					\
	do {								\
		if (f2fs_has_inline_data(inode))			\
2771
			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
2772 2773 2774 2775
	} while (0)
#define stat_dec_inline_inode(inode)					\
	do {								\
		if (f2fs_has_inline_data(inode))			\
2776
			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
2777
	} while (0)
2778 2779 2780
#define stat_inc_inline_dir(inode)					\
	do {								\
		if (f2fs_has_inline_dentry(inode))			\
2781
			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
2782 2783 2784 2785
	} while (0)
#define stat_dec_inline_dir(inode)					\
	do {								\
		if (f2fs_has_inline_dentry(inode))			\
2786
			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
2787
	} while (0)
2788 2789 2790 2791
#define stat_inc_seg_type(sbi, curseg)					\
		((sbi)->segment_count[(curseg)->alloc_type]++)
#define stat_inc_block_count(sbi, curseg)				\
		((sbi)->block_count[(curseg)->alloc_type]++)
2792 2793
#define stat_inc_inplace_blocks(sbi)					\
		(atomic_inc(&(sbi)->inplace_count))
2794
#define stat_inc_atomic_write(inode)					\
2795
		(atomic_inc(&F2FS_I_SB(inode)->aw_cnt))
2796
#define stat_dec_atomic_write(inode)					\
2797
		(atomic_dec(&F2FS_I_SB(inode)->aw_cnt))
2798 2799 2800 2801 2802 2803 2804
#define stat_update_max_atomic_write(inode)				\
	do {								\
		int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt);	\
		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
		if (cur > max)						\
			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
	} while (0)
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815
#define stat_inc_volatile_write(inode)					\
		(atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
#define stat_dec_volatile_write(inode)					\
		(atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
#define stat_update_max_volatile_write(inode)				\
	do {								\
		int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);	\
		int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);	\
		if (cur > max)						\
			atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur);	\
	} while (0)
2816
#define stat_inc_seg_count(sbi, type, gc_type)				\
2817
	do {								\
2818
		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
2819 2820
		si->tot_segs++;						\
		if ((type) == SUM_TYPE_DATA) {				\
2821
			si->data_segs++;				\
2822 2823
			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
		} else {						\
2824
			si->node_segs++;				\
2825 2826
			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
		}							\
2827 2828 2829
	} while (0)

#define stat_inc_tot_blk_count(si, blks)				\
2830
	((si)->tot_blks += (blks))
2831

2832
#define stat_inc_data_blk_count(sbi, blks, gc_type)			\
2833
	do {								\
2834
		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
2835 2836
		stat_inc_tot_blk_count(si, blks);			\
		si->data_blks += (blks);				\
2837
		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
2838 2839
	} while (0)

2840
#define stat_inc_node_blk_count(sbi, blks, gc_type)			\
2841
	do {								\
2842
		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
2843 2844
		stat_inc_tot_blk_count(si, blks);			\
		si->node_blks += (blks);				\
2845
		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
2846 2847
	} while (0)

2848 2849
int f2fs_build_stats(struct f2fs_sb_info *sbi);
void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
2850
int __init f2fs_create_root_stats(void);
2851
void f2fs_destroy_root_stats(void);
2852
#else
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
#define stat_inc_cp_count(si)				do { } while (0)
#define stat_inc_bg_cp_count(si)			do { } while (0)
#define stat_inc_call_count(si)				do { } while (0)
#define stat_inc_bggc_count(si)				do { } while (0)
#define stat_inc_dirty_inode(sbi, type)			do { } while (0)
#define stat_dec_dirty_inode(sbi, type)			do { } while (0)
#define stat_inc_total_hit(sb)				do { } while (0)
#define stat_inc_rbtree_node_hit(sb)			do { } while (0)
#define stat_inc_largest_node_hit(sbi)			do { } while (0)
#define stat_inc_cached_node_hit(sbi)			do { } while (0)
#define stat_inc_inline_xattr(inode)			do { } while (0)
#define stat_dec_inline_xattr(inode)			do { } while (0)
#define stat_inc_inline_inode(inode)			do { } while (0)
#define stat_dec_inline_inode(inode)			do { } while (0)
#define stat_inc_inline_dir(inode)			do { } while (0)
#define stat_dec_inline_dir(inode)			do { } while (0)
#define stat_inc_atomic_write(inode)			do { } while (0)
#define stat_dec_atomic_write(inode)			do { } while (0)
#define stat_update_max_atomic_write(inode)		do { } while (0)
#define stat_inc_volatile_write(inode)			do { } while (0)
#define stat_dec_volatile_write(inode)			do { } while (0)
#define stat_update_max_volatile_write(inode)		do { } while (0)
#define stat_inc_seg_type(sbi, curseg)			do { } while (0)
#define stat_inc_block_count(sbi, curseg)		do { } while (0)
#define stat_inc_inplace_blocks(sbi)			do { } while (0)
#define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
#define stat_inc_tot_blk_count(si, blks)		do { } while (0)
#define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
#define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
2882 2883 2884

static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
2885
static inline int __init f2fs_create_root_stats(void) { return 0; }
2886
static inline void f2fs_destroy_root_stats(void) { }
2887 2888 2889 2890 2891 2892 2893 2894 2895 2896
#endif

extern const struct file_operations f2fs_dir_operations;
extern const struct file_operations f2fs_file_operations;
extern const struct inode_operations f2fs_file_inode_operations;
extern const struct address_space_operations f2fs_dblock_aops;
extern const struct address_space_operations f2fs_node_aops;
extern const struct address_space_operations f2fs_meta_aops;
extern const struct inode_operations f2fs_dir_inode_operations;
extern const struct inode_operations f2fs_symlink_inode_operations;
2897
extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
2898
extern const struct inode_operations f2fs_special_inode_operations;
Jaegeuk Kim's avatar
Jaegeuk Kim committed
2899
extern struct kmem_cache *inode_entry_slab;
2900

2901 2902 2903
/*
 * inline.c
 */
2904 2905 2906
bool f2fs_may_inline_data(struct inode *inode);
bool f2fs_may_inline_dentry(struct inode *inode);
void read_inline_data(struct page *page, struct page *ipage);
2907
void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from);
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
int f2fs_read_inline_data(struct inode *inode, struct page *page);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_write_inline_data(struct inode *inode, struct page *page);
bool recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
			struct fscrypt_name *fname, struct page **res_page);
int make_empty_inline_dir(struct inode *inode, struct inode *parent,
			struct page *ipage);
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
			const struct qstr *orig_name,
			struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
			struct inode *dir, struct inode *inode);
bool f2fs_empty_inline_dir(struct inode *dir);
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
			struct fscrypt_str *fstr);
int f2fs_inline_data_fiemap(struct inode *inode,
			struct fiemap_extent_info *fieinfo,
			__u64 start, __u64 len);
2928

2929 2930 2931
/*
 * shrinker.c
 */
2932 2933 2934 2935 2936 2937
unsigned long f2fs_shrink_count(struct shrinker *shrink,
			struct shrink_control *sc);
unsigned long f2fs_shrink_scan(struct shrinker *shrink,
			struct shrink_control *sc);
void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
2938

2939 2940 2941
/*
 * extent_cache.c
 */
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951
struct rb_entry *__lookup_rb_tree(struct rb_root *root,
				struct rb_entry *cached_re, unsigned int ofs);
struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
				struct rb_root *root, struct rb_node **parent,
				unsigned int ofs);
struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
		struct rb_entry *cached_re, unsigned int ofs,
		struct rb_entry **prev_entry, struct rb_entry **next_entry,
		struct rb_node ***insert_p, struct rb_node **insert_parent,
		bool force);
2952 2953
bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
						struct rb_root *root);
2954 2955 2956 2957 2958 2959 2960 2961
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
void f2fs_drop_extent_tree(struct inode *inode);
unsigned int f2fs_destroy_extent_node(struct inode *inode);
void f2fs_destroy_extent_tree(struct inode *inode);
bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
			struct extent_info *ei);
void f2fs_update_extent_cache(struct dnode_of_data *dn);
2962
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
2963 2964
			pgoff_t fofs, block_t blkaddr, unsigned int len);
void init_extent_cache_info(struct f2fs_sb_info *sbi);
2965 2966 2967
int __init create_extent_cache(void);
void destroy_extent_cache(void);

2968 2969 2970
/*
 * sysfs.c
 */
2971 2972 2973 2974
int __init f2fs_init_sysfs(void);
void f2fs_exit_sysfs(void);
int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
2975

2976 2977 2978
/*
 * crypto support
 */
2979
static inline bool f2fs_encrypted_inode(struct inode *inode)
2980 2981 2982 2983
{
	return file_is_encrypt(inode);
}

2984 2985 2986 2987 2988
static inline bool f2fs_encrypted_file(struct inode *inode)
{
	return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode);
}

2989 2990 2991 2992 2993 2994 2995 2996 2997
static inline void f2fs_set_encrypted_inode(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
	file_set_encrypt(inode);
#endif
}

static inline bool f2fs_bio_encrypted(struct bio *bio)
{
2998
	return bio->bi_private != NULL;
2999 3000 3001 3002 3003 3004
}

static inline int f2fs_sb_has_crypto(struct super_block *sb)
{
	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
}
3005

3006
static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
3007
{
3008
	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
3009 3010
}

3011 3012 3013 3014 3015
static inline int f2fs_sb_has_extra_attr(struct super_block *sb)
{
	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_EXTRA_ATTR);
}

Chao Yu's avatar
Chao Yu committed
3016 3017 3018 3019 3020
static inline int f2fs_sb_has_project_quota(struct super_block *sb)
{
	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_PRJQUOTA);
}

Chao Yu's avatar
Chao Yu committed
3021 3022 3023 3024 3025
static inline int f2fs_sb_has_inode_chksum(struct super_block *sb)
{
	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM);
}

3026 3027
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
3028
			struct block_device *bdev, block_t blkaddr)
3029 3030
{
	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
3031
	int i;
3032

3033 3034 3035 3036
	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).bdev == bdev)
			return FDEV(i).blkz_type[zno];
	return -EINVAL;
3037 3038 3039
}
#endif

3040
static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
3041
{
3042 3043 3044
	struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);

	return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061
}

static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
{
	clear_opt(sbi, ADAPTIVE);
	clear_opt(sbi, LFS);

	switch (mt) {
	case F2FS_MOUNT_ADAPTIVE:
		set_opt(sbi, ADAPTIVE);
		break;
	case F2FS_MOUNT_LFS:
		set_opt(sbi, LFS);
		break;
	}
}

3062 3063 3064
static inline bool f2fs_may_encrypt(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
3065
	umode_t mode = inode->i_mode;
3066 3067 3068 3069 3070 3071 3072

	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
#else
	return 0;
#endif
}

3073
#endif