Commit 185f0c70 authored by David Howells's avatar David Howells

afs: Wrap page->private manipulations in inline functions

The afs filesystem uses page->private to store the dirty range within a
page such that in the event of a conflicting 3rd-party write to the server,
we write back just the bits that got changed locally.

However, there are a couple of problems with this:

 (1) I need a bit to note if the page might be mapped so that partial
     invalidation doesn't shrink the range.

 (2) There aren't necessarily sufficient bits to store the entire range of
     data altered (say it's a 32-bit system with 64KiB pages or transparent
     huge pages are in use).

So wrap the accesses in inline functions so that future commits can change
how this works.

Also move them out of the tracing header into the in-directory header.
There's not really any need for them to be in the tracing header.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent f792e3ac
......@@ -858,6 +858,34 @@ struct afs_vnode_cache_aux {
u64 data_version;
} __packed;
/*
* We use page->private to hold the amount of the page that we've written to,
* splitting the field into two parts. However, we need to represent a range
* 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
*/
#if PAGE_SIZE > 32768
#define __AFS_PAGE_PRIV_MASK 0xffffffffUL
#define __AFS_PAGE_PRIV_SHIFT 32
#else
#define __AFS_PAGE_PRIV_MASK 0xffffUL
#define __AFS_PAGE_PRIV_SHIFT 16
#endif
static inline size_t afs_page_dirty_from(unsigned long priv)
{
return priv & __AFS_PAGE_PRIV_MASK;
}
static inline size_t afs_page_dirty_to(unsigned long priv)
{
return (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
}
static inline unsigned long afs_page_dirty(size_t from, size_t to)
{
return ((unsigned long)to << __AFS_PAGE_PRIV_SHIFT) | from;
}
#include <trace/events/afs.h>
/*****************************************************************************/
......
......@@ -117,8 +117,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
t = f = 0;
if (PagePrivate(page)) {
priv = page_private(page);
f = priv & AFS_PRIV_MAX;
t = priv >> AFS_PRIV_SHIFT;
f = afs_page_dirty_from(priv);
t = afs_page_dirty_to(priv);
ASSERTCMP(f, <=, t);
}
......@@ -206,22 +206,18 @@ int afs_write_end(struct file *file, struct address_space *mapping,
if (PagePrivate(page)) {
priv = page_private(page);
f = priv & AFS_PRIV_MAX;
t = priv >> AFS_PRIV_SHIFT;
f = afs_page_dirty_from(priv);
t = afs_page_dirty_to(priv);
if (from < f)
f = from;
if (to > t)
t = to;
priv = (unsigned long)t << AFS_PRIV_SHIFT;
priv |= f;
priv = afs_page_dirty(f, t);
set_page_private(page, priv);
trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
page->index, priv);
} else {
f = from;
t = to;
priv = (unsigned long)t << AFS_PRIV_SHIFT;
priv |= f;
priv = afs_page_dirty(from, to);
attach_page_private(page, (void *)priv);
trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
page->index, priv);
......@@ -522,8 +518,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
*/
start = primary_page->index;
priv = page_private(primary_page);
offset = priv & AFS_PRIV_MAX;
to = priv >> AFS_PRIV_SHIFT;
offset = afs_page_dirty_from(priv);
to = afs_page_dirty_to(priv);
trace_afs_page_dirty(vnode, tracepoint_string("store"),
primary_page->index, priv);
......@@ -568,8 +564,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
priv = page_private(page);
f = priv & AFS_PRIV_MAX;
t = priv >> AFS_PRIV_SHIFT;
f = afs_page_dirty_from(priv);
t = afs_page_dirty_to(priv);
if (f != 0 &&
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
unlock_page(page);
......@@ -870,8 +866,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
*/
wait_on_page_writeback(vmf->page);
priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
priv |= 0; /* From */
priv = afs_page_dirty(0, PAGE_SIZE);
trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
vmf->page->index, priv);
if (PagePrivate(vmf->page))
......@@ -930,8 +925,8 @@ int afs_launder_page(struct page *page)
f = 0;
t = PAGE_SIZE;
if (PagePrivate(page)) {
f = priv & AFS_PRIV_MAX;
t = priv >> AFS_PRIV_SHIFT;
f = afs_page_dirty_from(priv);
t = afs_page_dirty_to(priv);
}
trace_afs_page_dirty(vnode, tracepoint_string("launder"),
......
......@@ -966,19 +966,6 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
/*
* We use page->private to hold the amount of the page that we've written to,
* splitting the field into two parts. However, we need to represent a range
* 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
*/
#if PAGE_SIZE > 32768
#define AFS_PRIV_MAX 0xffffffff
#define AFS_PRIV_SHIFT 32
#else
#define AFS_PRIV_MAX 0xffff
#define AFS_PRIV_SHIFT 16
#endif
TRACE_EVENT(afs_page_dirty,
TP_PROTO(struct afs_vnode *vnode, const char *where,
pgoff_t page, unsigned long priv),
......@@ -999,10 +986,10 @@ TRACE_EVENT(afs_page_dirty,
__entry->priv = priv;
),
TP_printk("vn=%p %lx %s %lu-%lu",
TP_printk("vn=%p %lx %s %zx-%zx",
__entry->vnode, __entry->page, __entry->where,
__entry->priv & AFS_PRIV_MAX,
__entry->priv >> AFS_PRIV_SHIFT)
afs_page_dirty_from(__entry->priv),
afs_page_dirty_to(__entry->priv))
);
TRACE_EVENT(afs_call_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment