Commit 9e91c572 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

iomap: lift common tracing code from xfs to iomap

Lift the xfs code for tracing address space operations to the iomap
layer.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 009d8d84
...@@ -3,13 +3,15 @@ ...@@ -3,13 +3,15 @@
# Copyright (c) 2019 Oracle. # Copyright (c) 2019 Oracle.
# All Rights Reserved. # All Rights Reserved.
# #
obj-$(CONFIG_FS_IOMAP) += iomap.o
iomap-y += \ ccflags-y += -I $(srctree)/$(src) # needed for trace events
apply.o \
buffered-io.o \ obj-$(CONFIG_FS_IOMAP) += iomap.o
direct-io.o \
fiemap.o \
seek.o
iomap-y += trace.o \
apply.o \
buffered-io.o \
direct-io.o \
fiemap.o \
seek.o
iomap-$(CONFIG_SWAP) += swapfile.o iomap-$(CONFIG_SWAP) += swapfile.o
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/migrate.h> #include <linux/migrate.h>
#include "trace.h"
#include "../internal.h" #include "../internal.h"
...@@ -301,6 +302,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops) ...@@ -301,6 +302,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
unsigned poff; unsigned poff;
loff_t ret; loff_t ret;
trace_iomap_readpage(page->mapping->host, 1);
for (poff = 0; poff < PAGE_SIZE; poff += ret) { for (poff = 0; poff < PAGE_SIZE; poff += ret) {
ret = iomap_apply(inode, page_offset(page) + poff, ret = iomap_apply(inode, page_offset(page) + poff,
PAGE_SIZE - poff, 0, ops, &ctx, PAGE_SIZE - poff, 0, ops, &ctx,
...@@ -397,6 +400,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -397,6 +400,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
loff_t last = page_offset(list_entry(pages->next, struct page, lru)); loff_t last = page_offset(list_entry(pages->next, struct page, lru));
loff_t length = last - pos + PAGE_SIZE, ret = 0; loff_t length = last - pos + PAGE_SIZE, ret = 0;
trace_iomap_readpages(mapping->host, nr_pages);
while (length > 0) { while (length > 0) {
ret = iomap_apply(mapping->host, pos, length, 0, ops, ret = iomap_apply(mapping->host, pos, length, 0, ops,
&ctx, iomap_readpages_actor); &ctx, iomap_readpages_actor);
...@@ -463,6 +468,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); ...@@ -463,6 +468,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int int
iomap_releasepage(struct page *page, gfp_t gfp_mask) iomap_releasepage(struct page *page, gfp_t gfp_mask)
{ {
trace_iomap_releasepage(page->mapping->host, page, 0, 0);
/* /*
* mm accommodates an old ext3 case where clean pages might not have had * mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to * the dirty bit cleared. Thus, it can send actual dirty pages to
...@@ -478,6 +485,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage); ...@@ -478,6 +485,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void void
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
{ {
trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
/* /*
* If we are invalidating the entire page, clear the dirty state from it * If we are invalidating the entire page, clear the dirty state from it
* and release it to avoid unnecessary buildup of the LRU. * and release it to avoid unnecessary buildup of the LRU.
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Christoph Hellwig
*/
#include <linux/iomap.h>
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2009-2019 Christoph Hellwig
*
* NOTE: none of these tracepoints shall be consider a stable kernel ABI
* as they can change at any time.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iomap
#if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _IOMAP_TRACE_H
#include <linux/tracepoint.h>
struct inode;
DECLARE_EVENT_CLASS(iomap_readpage_class,
TP_PROTO(struct inode *inode, int nr_pages),
TP_ARGS(inode, nr_pages),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u64, ino)
__field(int, nr_pages)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->nr_pages = nr_pages;
),
TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->nr_pages)
)
#define DEFINE_READPAGE_EVENT(name) \
DEFINE_EVENT(iomap_readpage_class, name, \
TP_PROTO(struct inode *inode, int nr_pages), \
TP_ARGS(inode, nr_pages))
DEFINE_READPAGE_EVENT(iomap_readpage);
DEFINE_READPAGE_EVENT(iomap_readpages);
DECLARE_EVENT_CLASS(iomap_page_class,
TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
unsigned int len),
TP_ARGS(inode, page, off, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u64, ino)
__field(pgoff_t, pgoff)
__field(loff_t, size)
__field(unsigned long, offset)
__field(unsigned int, length)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pgoff = page_offset(page);
__entry->size = i_size_read(inode);
__entry->offset = off;
__entry->length = len;
),
TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
"length %x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pgoff,
__entry->size,
__entry->offset,
__entry->length)
)
#define DEFINE_PAGE_EVENT(name) \
DEFINE_EVENT(iomap_page_class, name, \
TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
unsigned int len), \
TP_ARGS(inode, page, off, len))
DEFINE_PAGE_EVENT(iomap_releasepage);
DEFINE_PAGE_EVENT(iomap_invalidatepage);
#endif /* _IOMAP_TRACE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
...@@ -823,16 +823,6 @@ xfs_add_to_ioend( ...@@ -823,16 +823,6 @@ xfs_add_to_ioend(
wbc_account_cgroup_owner(wbc, page, len); wbc_account_cgroup_owner(wbc, page, len);
} }
STATIC void
xfs_vm_invalidatepage(
struct page *page,
unsigned int offset,
unsigned int length)
{
trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
iomap_invalidatepage(page, offset, length);
}
/* /*
* If the page has delalloc blocks on it, we need to punch them out before we * If the page has delalloc blocks on it, we need to punch them out before we
* invalidate the page. If we don't, we leave a stale delalloc mapping on the * invalidate the page. If we don't, we leave a stale delalloc mapping on the
...@@ -867,7 +857,7 @@ xfs_aops_discard_page( ...@@ -867,7 +857,7 @@ xfs_aops_discard_page(
if (error && !XFS_FORCED_SHUTDOWN(mp)) if (error && !XFS_FORCED_SHUTDOWN(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping."); xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate: out_invalidate:
xfs_vm_invalidatepage(page, 0, PAGE_SIZE); iomap_invalidatepage(page, 0, PAGE_SIZE);
} }
/* /*
...@@ -1147,15 +1137,6 @@ xfs_dax_writepages( ...@@ -1147,15 +1137,6 @@ xfs_dax_writepages(
xfs_find_bdev_for_inode(mapping->host), wbc); xfs_find_bdev_for_inode(mapping->host), wbc);
} }
STATIC int
xfs_vm_releasepage(
struct page *page,
gfp_t gfp_mask)
{
trace_xfs_releasepage(page->mapping->host, page, 0, 0);
return iomap_releasepage(page, gfp_mask);
}
STATIC sector_t STATIC sector_t
xfs_vm_bmap( xfs_vm_bmap(
struct address_space *mapping, struct address_space *mapping,
...@@ -1184,7 +1165,6 @@ xfs_vm_readpage( ...@@ -1184,7 +1165,6 @@ xfs_vm_readpage(
struct file *unused, struct file *unused,
struct page *page) struct page *page)
{ {
trace_xfs_vm_readpage(page->mapping->host, 1);
return iomap_readpage(page, &xfs_iomap_ops); return iomap_readpage(page, &xfs_iomap_ops);
} }
...@@ -1195,7 +1175,6 @@ xfs_vm_readpages( ...@@ -1195,7 +1175,6 @@ xfs_vm_readpages(
struct list_head *pages, struct list_head *pages,
unsigned nr_pages) unsigned nr_pages)
{ {
trace_xfs_vm_readpages(mapping->host, nr_pages);
return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
} }
...@@ -1215,8 +1194,8 @@ const struct address_space_operations xfs_address_space_operations = { ...@@ -1215,8 +1194,8 @@ const struct address_space_operations xfs_address_space_operations = {
.writepage = xfs_vm_writepage, .writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages, .writepages = xfs_vm_writepages,
.set_page_dirty = iomap_set_page_dirty, .set_page_dirty = iomap_set_page_dirty,
.releasepage = xfs_vm_releasepage, .releasepage = iomap_releasepage,
.invalidatepage = xfs_vm_invalidatepage, .invalidatepage = iomap_invalidatepage,
.bmap = xfs_vm_bmap, .bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.migratepage = iomap_migrate_page, .migratepage = iomap_migrate_page,
......
...@@ -1197,32 +1197,6 @@ DEFINE_PAGE_EVENT(xfs_writepage); ...@@ -1197,32 +1197,6 @@ DEFINE_PAGE_EVENT(xfs_writepage);
DEFINE_PAGE_EVENT(xfs_releasepage); DEFINE_PAGE_EVENT(xfs_releasepage);
DEFINE_PAGE_EVENT(xfs_invalidatepage); DEFINE_PAGE_EVENT(xfs_invalidatepage);
DECLARE_EVENT_CLASS(xfs_readpage_class,
TP_PROTO(struct inode *inode, int nr_pages),
TP_ARGS(inode, nr_pages),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(int, nr_pages)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->nr_pages = nr_pages;
),
TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->nr_pages)
)
#define DEFINE_READPAGE_EVENT(name) \
DEFINE_EVENT(xfs_readpage_class, name, \
TP_PROTO(struct inode *inode, int nr_pages), \
TP_ARGS(inode, nr_pages))
DEFINE_READPAGE_EVENT(xfs_vm_readpage);
DEFINE_READPAGE_EVENT(xfs_vm_readpages);
DECLARE_EVENT_CLASS(xfs_imap_class, DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
int whichfork, struct xfs_bmbt_irec *irec), int whichfork, struct xfs_bmbt_irec *irec),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment