Commit a34847d4 authored by David Howells's avatar David Howells

afs: Don't use folio->private to record partial modification

AFS currently uses folio->private to store the range of bytes within a
folio that have been modified - the idea being that if we have, say, a 2MiB
folio and someone writes a single byte, we only have to write back that
single page and not the whole 2MiB folio - thereby saving on network
bandwidth.

Remove this, at least for now, and accept the extra network load (which
doesn't matter in the common case of writing a whole file at a time from
beginning to end).

This makes folio->private available for netfslib to use.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
parent 5f5ce7ba
...@@ -386,63 +386,6 @@ const struct netfs_request_ops afs_req_ops = { ...@@ -386,63 +386,6 @@ const struct netfs_request_ops afs_req_ops = {
.issue_read = afs_issue_read, .issue_read = afs_issue_read,
}; };
/*
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
*/
static void afs_invalidate_dirty(struct folio *folio, size_t offset,
size_t length)
{
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
unsigned long priv;
unsigned int f, t, end = offset + length;
priv = (unsigned long)folio_get_private(folio);
/* we clean up only if the entire page is being invalidated */
if (offset == 0 && length == folio_size(folio))
goto full_invalidate;
/* If the page was dirtied by page_mkwrite(), the PTE stays writable
* and we don't get another notification to tell us to expand it
* again.
*/
if (afs_is_folio_dirty_mmapped(priv))
return;
/* We may need to shorten the dirty region */
f = afs_folio_dirty_from(folio, priv);
t = afs_folio_dirty_to(folio, priv);
if (t <= offset || f >= end)
return; /* Doesn't overlap */
if (f < offset && t > end)
return; /* Splits the dirty region - just absorb it */
if (f >= offset && t <= end)
goto undirty;
if (f < offset)
t = offset;
else
f = end;
if (f == t)
goto undirty;
priv = afs_folio_dirty(folio, f, t);
folio_change_private(folio, (void *)priv);
trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
return;
undirty:
trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
folio_clear_dirty_for_io(folio);
full_invalidate:
trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
folio_detach_private(folio);
}
/* /*
* invalidate part or all of a page * invalidate part or all of a page
* - release a page and clean up its private data if offset is 0 (indicating * - release a page and clean up its private data if offset is 0 (indicating
...@@ -453,11 +396,6 @@ static void afs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -453,11 +396,6 @@ static void afs_invalidate_folio(struct folio *folio, size_t offset,
{ {
_enter("{%lu},%zu,%zu", folio->index, offset, length); _enter("{%lu},%zu,%zu", folio->index, offset, length);
BUG_ON(!folio_test_locked(folio));
if (folio_get_private(folio))
afs_invalidate_dirty(folio, offset, length);
folio_wait_fscache(folio); folio_wait_fscache(folio);
_leave(""); _leave("");
} }
...@@ -485,11 +423,6 @@ static bool afs_release_folio(struct folio *folio, gfp_t gfp) ...@@ -485,11 +423,6 @@ static bool afs_release_folio(struct folio *folio, gfp_t gfp)
fscache_note_page_release(afs_vnode_cache(vnode)); fscache_note_page_release(afs_vnode_cache(vnode));
#endif #endif
if (folio_test_private(folio)) {
trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
folio_detach_private(folio);
}
/* Indicate that the folio can be released */ /* Indicate that the folio can be released */
_leave(" = T"); _leave(" = T");
return true; return true;
......
...@@ -894,62 +894,6 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl ...@@ -894,62 +894,6 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
i_size_read(&vnode->netfs.inode), flags); i_size_read(&vnode->netfs.inode), flags);
} }
/*
* We use folio->private to hold the amount of the folio that we've written to,
* splitting the field into two parts. However, we need to represent a range
* 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
* exceeds what we can encode.
*/
#ifdef CONFIG_64BIT
#define __AFS_FOLIO_PRIV_MASK 0x7fffffffUL
#define __AFS_FOLIO_PRIV_SHIFT 32
#define __AFS_FOLIO_PRIV_MMAPPED 0x80000000UL
#else
#define __AFS_FOLIO_PRIV_MASK 0x7fffUL
#define __AFS_FOLIO_PRIV_SHIFT 16
#define __AFS_FOLIO_PRIV_MMAPPED 0x8000UL
#endif
static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
{
int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
return (shift > 0) ? shift : 0;
}
static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
{
unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;
/* The lower bound is inclusive */
return x << afs_folio_dirty_resolution(folio);
}
static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
{
unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;
/* The upper bound is immediately beyond the region */
return (x + 1) << afs_folio_dirty_resolution(folio);
}
static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
{
unsigned int res = afs_folio_dirty_resolution(folio);
from >>= res;
to = (to - 1) >> res;
return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
}
static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
{
return priv | __AFS_FOLIO_PRIV_MMAPPED;
}
static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
{
return priv & __AFS_FOLIO_PRIV_MMAPPED;
}
#include <trace/events/afs.h> #include <trace/events/afs.h>
/*****************************************************************************/ /*****************************************************************************/
......
This diff is collapsed.
...@@ -846,26 +846,18 @@ TRACE_EVENT(afs_folio_dirty, ...@@ -846,26 +846,18 @@ TRACE_EVENT(afs_folio_dirty,
__field(struct afs_vnode *, vnode) __field(struct afs_vnode *, vnode)
__field(const char *, where) __field(const char *, where)
__field(pgoff_t, index) __field(pgoff_t, index)
__field(unsigned long, from) __field(size_t, size)
__field(unsigned long, to)
), ),
TP_fast_assign( TP_fast_assign(
unsigned long priv = (unsigned long)folio_get_private(folio);
__entry->vnode = vnode; __entry->vnode = vnode;
__entry->where = where; __entry->where = where;
__entry->index = folio_index(folio); __entry->index = folio_index(folio);
__entry->from = afs_folio_dirty_from(folio, priv); __entry->size = folio_size(folio);
__entry->to = afs_folio_dirty_to(folio, priv);
__entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
(1UL << (BITS_PER_LONG - 1)) : 0);
), ),
TP_printk("vn=%p %lx %s %lx-%lx%s", TP_printk("vn=%p ix=%05lx s=%05lx %s",
__entry->vnode, __entry->index, __entry->where, __entry->vnode, __entry->index, __entry->size, __entry->where)
__entry->from,
__entry->to & ~(1UL << (BITS_PER_LONG - 1)),
__entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
); );
TRACE_EVENT(afs_call_state, TRACE_EVENT(afs_call_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment