Commit 078b8b90 authored by David Sterba's avatar David Sterba

btrfs: merge ordered work callbacks in btrfs_work into one

There are two callbacks defined in btrfs_work but only two actually make
use of them, otherwise there are NULLs. We can get rid of the freeing
callback making it a special case of the normal work. This reduces the
size of btrfs_work by 8 bytes, final layout:

struct btrfs_work {
        btrfs_func_t               func;                 /*     0     8 */
        btrfs_ordered_func_t       ordered_func;         /*     8     8 */
        struct work_struct         normal_work;          /*    16    32 */
        struct list_head           ordered_list;         /*    48    16 */
        /* --- cacheline 1 boundary (64 bytes) --- */
        struct btrfs_workqueue *   wq;                   /*    64     8 */
        long unsigned int          flags;                /*    72     8 */

        /* size: 80, cachelines: 2, members: 6 */
        /* last cacheline: 16 bytes */
};

This in turn reduces size of other structures (on a release config):

- async_chunk			 160 ->  152
- async_submit_bio		 152 ->  144
- btrfs_async_delayed_work	 104 ->   96
- btrfs_caching_control		 176 ->  168
- btrfs_delalloc_work		 144 ->  136
- btrfs_fs_info			3608 -> 3600
- btrfs_ordered_extent		 440 ->  424
- btrfs_writepage_fixup		 104 ->   96
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e9b9b911
...@@ -243,7 +243,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq, ...@@ -243,7 +243,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
break; break;
trace_btrfs_ordered_sched(work); trace_btrfs_ordered_sched(work);
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
work->ordered_func(work); work->ordered_func(work, false);
/* now take the lock again and drop our item from the list */ /* now take the lock again and drop our item from the list */
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
...@@ -278,7 +278,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq, ...@@ -278,7 +278,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
* We don't want to call the ordered free functions with * We don't want to call the ordered free functions with
* the lock held. * the lock held.
*/ */
work->ordered_free(work); work->ordered_func(work, true);
/* NB: work must not be dereferenced past this point. */ /* NB: work must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, work); trace_btrfs_all_work_done(wq->fs_info, work);
} }
...@@ -286,7 +286,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq, ...@@ -286,7 +286,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
if (free_self) { if (free_self) {
self->ordered_free(self); self->ordered_func(self, true);
/* NB: self must not be dereferenced past this point. */ /* NB: self must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, self); trace_btrfs_all_work_done(wq->fs_info, self);
} }
...@@ -301,7 +301,7 @@ static void btrfs_work_helper(struct work_struct *normal_work) ...@@ -301,7 +301,7 @@ static void btrfs_work_helper(struct work_struct *normal_work)
/* /*
* We should not touch things inside work in the following cases: * We should not touch things inside work in the following cases:
* 1) after work->func() if it has no ordered_free * 1) after work->func() if it has no ordered_func(..., true) to free
* Since the struct is freed in work->func(). * Since the struct is freed in work->func().
* 2) after setting WORK_DONE_BIT * 2) after setting WORK_DONE_BIT
* The work may be freed in other threads almost instantly. * The work may be freed in other threads almost instantly.
...@@ -330,11 +330,10 @@ static void btrfs_work_helper(struct work_struct *normal_work) ...@@ -330,11 +330,10 @@ static void btrfs_work_helper(struct work_struct *normal_work)
} }
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t ordered_func, btrfs_func_t ordered_free) btrfs_ordered_func_t ordered_func)
{ {
work->func = func; work->func = func;
work->ordered_func = ordered_func; work->ordered_func = ordered_func;
work->ordered_free = ordered_free;
INIT_WORK(&work->normal_work, btrfs_work_helper); INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list); INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0; work->flags = 0;
......
...@@ -13,11 +13,11 @@ struct btrfs_fs_info; ...@@ -13,11 +13,11 @@ struct btrfs_fs_info;
struct btrfs_workqueue; struct btrfs_workqueue;
struct btrfs_work; struct btrfs_work;
typedef void (*btrfs_func_t)(struct btrfs_work *arg); typedef void (*btrfs_func_t)(struct btrfs_work *arg);
typedef void (*btrfs_ordered_func_t)(struct btrfs_work *arg, bool);
struct btrfs_work { struct btrfs_work {
btrfs_func_t func; btrfs_func_t func;
btrfs_func_t ordered_func; btrfs_ordered_func_t ordered_func;
btrfs_func_t ordered_free;
/* Don't touch things below */ /* Don't touch things below */
struct work_struct normal_work; struct work_struct normal_work;
...@@ -35,7 +35,7 @@ struct btrfs_workqueue *btrfs_alloc_ordered_workqueue( ...@@ -35,7 +35,7 @@ struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
struct btrfs_fs_info *fs_info, const char *name, struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags); unsigned int flags);
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t ordered_func, btrfs_func_t ordered_free); btrfs_ordered_func_t ordered_func);
void btrfs_queue_work(struct btrfs_workqueue *wq, void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work); struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
......
...@@ -574,13 +574,20 @@ static void run_one_async_start(struct btrfs_work *work) ...@@ -574,13 +574,20 @@ static void run_one_async_start(struct btrfs_work *work)
* *
* At IO completion time the csums attached on the ordered extent record are * At IO completion time the csums attached on the ordered extent record are
* inserted into the tree. * inserted into the tree.
*
* If called with @do_free == true, then it will free the work struct.
*/ */
static void run_one_async_done(struct btrfs_work *work) static void run_one_async_done(struct btrfs_work *work, bool do_free)
{ {
struct async_submit_bio *async = struct async_submit_bio *async =
container_of(work, struct async_submit_bio, work); container_of(work, struct async_submit_bio, work);
struct bio *bio = &async->bbio->bio; struct bio *bio = &async->bbio->bio;
if (do_free) {
kfree(container_of(work, struct async_submit_bio, work));
return;
}
/* If an error occurred we just want to clean up the bio and move on. */ /* If an error occurred we just want to clean up the bio and move on. */
if (bio->bi_status) { if (bio->bi_status) {
btrfs_orig_bbio_end_io(async->bbio); btrfs_orig_bbio_end_io(async->bbio);
...@@ -596,11 +603,6 @@ static void run_one_async_done(struct btrfs_work *work) ...@@ -596,11 +603,6 @@ static void run_one_async_done(struct btrfs_work *work)
__btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
} }
static void run_one_async_free(struct btrfs_work *work)
{
kfree(container_of(work, struct async_submit_bio, work));
}
static bool should_async_write(struct btrfs_bio *bbio) static bool should_async_write(struct btrfs_bio *bbio)
{ {
/* Submit synchronously if the checksum implementation is fast. */ /* Submit synchronously if the checksum implementation is fast. */
...@@ -642,8 +644,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, ...@@ -642,8 +644,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
async->smap = *smap; async->smap = *smap;
async->mirror_num = mirror_num; async->mirror_num = mirror_num;
btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, btrfs_init_work(&async->work, run_one_async_start, run_one_async_done);
run_one_async_free);
btrfs_queue_work(fs_info->workers, &async->work); btrfs_queue_work(fs_info->workers, &async->work);
return true; return true;
} }
......
...@@ -935,7 +935,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) ...@@ -935,7 +935,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
caching_ctl->block_group = cache; caching_ctl->block_group = cache;
refcount_set(&caching_ctl->count, 2); refcount_set(&caching_ctl->count, 2);
atomic_set(&caching_ctl->progress, 0); atomic_set(&caching_ctl->progress, 0);
btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) { if (cache->cached != BTRFS_CACHE_NO) {
......
...@@ -1379,8 +1379,7 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, ...@@ -1379,8 +1379,7 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM; return -ENOMEM;
async_work->delayed_root = delayed_root; async_work->delayed_root = delayed_root;
btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
NULL);
async_work->nr = nr; async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work); btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
......
...@@ -1566,8 +1566,11 @@ static noinline int cow_file_range(struct btrfs_inode *inode, ...@@ -1566,8 +1566,11 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* Phase two of compressed writeback. This is the ordered portion of the code, * Phase two of compressed writeback. This is the ordered portion of the code,
* which only gets called in the order the work was queued. We walk all the * which only gets called in the order the work was queued. We walk all the
* async extents created by compress_file_range and send them down to the disk. * async extents created by compress_file_range and send them down to the disk.
*
* If called with @do_free == true then it'll try to finish the work and free
* the work struct eventually.
*/ */
static noinline void submit_compressed_extents(struct btrfs_work *work) static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
{ {
struct async_chunk *async_chunk = container_of(work, struct async_chunk, struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work); work);
...@@ -1576,6 +1579,21 @@ static noinline void submit_compressed_extents(struct btrfs_work *work) ...@@ -1576,6 +1579,21 @@ static noinline void submit_compressed_extents(struct btrfs_work *work)
unsigned long nr_pages; unsigned long nr_pages;
u64 alloc_hint = 0; u64 alloc_hint = 0;
if (do_free) {
struct async_chunk *async_chunk;
struct async_cow *async_cow;
async_chunk = container_of(work, struct async_chunk, work);
btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
async_cow = async_chunk->async_cow;
if (atomic_dec_and_test(&async_cow->num_chunks))
kvfree(async_cow);
return;
}
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT; PAGE_SHIFT;
...@@ -1592,21 +1610,6 @@ static noinline void submit_compressed_extents(struct btrfs_work *work) ...@@ -1592,21 +1610,6 @@ static noinline void submit_compressed_extents(struct btrfs_work *work)
cond_wake_up_nomb(&fs_info->async_submit_wait); cond_wake_up_nomb(&fs_info->async_submit_wait);
} }
static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_chunk *async_chunk;
struct async_cow *async_cow;
async_chunk = container_of(work, struct async_chunk, work);
btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
async_cow = async_chunk->async_cow;
if (atomic_dec_and_test(&async_cow->num_chunks))
kvfree(async_cow);
}
static bool run_delalloc_compressed(struct btrfs_inode *inode, static bool run_delalloc_compressed(struct btrfs_inode *inode,
struct page *locked_page, u64 start, struct page *locked_page, u64 start,
u64 end, struct writeback_control *wbc) u64 end, struct writeback_control *wbc)
...@@ -1684,7 +1687,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode, ...@@ -1684,7 +1687,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
} }
btrfs_init_work(&async_chunk[i].work, compress_file_range, btrfs_init_work(&async_chunk[i].work, compress_file_range,
submit_compressed_extents, async_cow_free); submit_compressed_extents);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages); atomic_add(nr_pages, &fs_info->async_delalloc_pages);
...@@ -2848,7 +2851,7 @@ int btrfs_writepage_cow_fixup(struct page *page) ...@@ -2848,7 +2851,7 @@ int btrfs_writepage_cow_fixup(struct page *page)
ihold(inode); ihold(inode);
btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
get_page(page); get_page(page);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
fixup->page = page; fixup->page = page;
fixup->inode = BTRFS_I(inode); fixup->inode = BTRFS_I(inode);
btrfs_queue_work(fs_info->fixup_workers, &fixup->work); btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
...@@ -9215,7 +9218,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode ...@@ -9215,7 +9218,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion); init_completion(&work->completion);
INIT_LIST_HEAD(&work->list); INIT_LIST_HEAD(&work->list);
work->inode = inode; work->inode = inode;
btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
return work; return work;
} }
......
...@@ -365,7 +365,7 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered) ...@@ -365,7 +365,7 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ? struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
fs_info->endio_freespace_worker : fs_info->endio_write_workers; fs_info->endio_freespace_worker : fs_info->endio_write_workers;
btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL); btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
btrfs_queue_work(wq, &ordered->work); btrfs_queue_work(wq, &ordered->work);
} }
...@@ -712,7 +712,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, ...@@ -712,7 +712,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
spin_unlock(&root->ordered_extent_lock); spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work, btrfs_init_work(&ordered->flush_work,
btrfs_run_ordered_extent_work, NULL, NULL); btrfs_run_ordered_extent_work, NULL);
list_add_tail(&ordered->work_list, &works); list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
......
...@@ -3583,7 +3583,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, ...@@ -3583,7 +3583,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
mutex_unlock(&fs_info->qgroup_rescan_lock); mutex_unlock(&fs_info->qgroup_rescan_lock);
btrfs_init_work(&fs_info->qgroup_rescan_work, btrfs_init_work(&fs_info->qgroup_rescan_work,
btrfs_qgroup_rescan_worker, NULL, NULL); btrfs_qgroup_rescan_worker, NULL);
return 0; return 0;
} }
......
...@@ -1561,7 +1561,6 @@ DECLARE_EVENT_CLASS(btrfs__work, ...@@ -1561,7 +1561,6 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( const void *, wq ) __field( const void *, wq )
__field( const void *, func ) __field( const void *, func )
__field( const void *, ordered_func ) __field( const void *, ordered_func )
__field( const void *, ordered_free )
__field( const void *, normal_work ) __field( const void *, normal_work )
), ),
...@@ -1570,14 +1569,12 @@ DECLARE_EVENT_CLASS(btrfs__work, ...@@ -1570,14 +1569,12 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->wq = work->wq; __entry->wq = work->wq;
__entry->func = work->func; __entry->func = work->func;
__entry->ordered_func = work->ordered_func; __entry->ordered_func = work->ordered_func;
__entry->ordered_free = work->ordered_free;
__entry->normal_work = &work->normal_work; __entry->normal_work = &work->normal_work;
), ),
TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p " TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p",
"ordered_free=%p",
__entry->work, __entry->normal_work, __entry->wq, __entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free) __entry->func, __entry->ordered_func)
); );
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment