Commit 38aa0889 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: align direct_io'ed data to section

This patch aligns the start block address of a file for direct io to the f2fs's
section size.

Some flash devices manage an over 4KB-sized page as a write unit, and if the
direct_io'ed data are written but not aligned to that unit, the performance can
be degraded due to the partial page copies.

Thus, since f2fs has a section that is well aligned to FTL units, we can align
the block address to the section size so that f2fs avoids this misalignment.
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 41ef94b3
...@@ -564,6 +564,7 @@ static int __allocate_data_block(struct dnode_of_data *dn) ...@@ -564,6 +564,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
struct f2fs_inode_info *fi = F2FS_I(dn->inode); struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum; struct f2fs_summary sum;
struct node_info ni; struct node_info ni;
int seg = CURSEG_WARM_DATA;
pgoff_t fofs; pgoff_t fofs;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
...@@ -574,8 +575,10 @@ static int __allocate_data_block(struct dnode_of_data *dn) ...@@ -574,8 +575,10 @@ static int __allocate_data_block(struct dnode_of_data *dn)
get_node_info(sbi, dn->nid, &ni); get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
CURSEG_WARM_DATA); seg = CURSEG_DIRECT_IO;
allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
/* direct IO doesn't use extent cache to maximize the performance */ /* direct IO doesn't use extent cache to maximize the performance */
__set_data_blkaddr(dn); __set_data_blkaddr(dn);
......
...@@ -406,7 +406,8 @@ enum { ...@@ -406,7 +406,8 @@ enum {
CURSEG_HOT_NODE, /* direct node blocks of directory files */ CURSEG_HOT_NODE, /* direct node blocks of directory files */
CURSEG_WARM_NODE, /* direct node blocks of normal files */ CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */ CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE NO_CHECK_TYPE,
CURSEG_DIRECT_IO, /* to use for the direct IO path */
}; };
struct flush_cmd { struct flush_cmd {
......
...@@ -1024,18 +1024,22 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, ...@@ -1024,18 +1024,22 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
stat_inc_seg_type(sbi, curseg); stat_inc_seg_type(sbi, curseg);
} }
static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int old_segno;
old_segno = curseg->segno;
SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
locate_dirty_segment(sbi, old_segno);
}
void allocate_new_segments(struct f2fs_sb_info *sbi) void allocate_new_segments(struct f2fs_sb_info *sbi)
{ {
struct curseg_info *curseg;
unsigned int old_curseg;
int i; int i;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
curseg = CURSEG_I(sbi, i); __allocate_new_segments(sbi, i);
old_curseg = curseg->segno;
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
locate_dirty_segment(sbi, old_curseg);
}
} }
static const struct segment_allocation default_salloc_ops = { static const struct segment_allocation default_salloc_ops = {
...@@ -1148,11 +1152,18 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -1148,11 +1152,18 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
{ {
struct sit_info *sit_i = SIT_I(sbi); struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg; struct curseg_info *curseg;
bool direct_io = (type == CURSEG_DIRECT_IO);
type = direct_io ? CURSEG_WARM_DATA : type;
curseg = CURSEG_I(sbi, type); curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex); mutex_lock(&curseg->curseg_mutex);
/* direct_io'ed data is aligned to the segment for better performance */
if (direct_io && curseg->next_blkoff)
__allocate_new_segments(sbi, type);
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment