Commit b66aaa8d authored by Trond Myklebust's avatar Trond Myklebust

NFS: Fix the inode request accounting when pages have subrequests

Both nfs_destroy_unlinked_subrequests() and nfs_lock_and_join_requests()
manipulate the inode flags adjusting the NFS_I(inode)->nrequests.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent 31a01f09
...@@ -418,7 +418,8 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, ...@@ -418,7 +418,8 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
*/ */
static void static void
nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
struct nfs_page *old_head) struct nfs_page *old_head,
struct inode *inode)
{ {
while (destroy_list) { while (destroy_list) {
struct nfs_page *subreq = destroy_list; struct nfs_page *subreq = destroy_list;
...@@ -443,9 +444,12 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, ...@@ -443,9 +444,12 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
nfs_page_group_clear_bits(subreq); nfs_page_group_clear_bits(subreq);
/* release the PG_INODE_REF reference */ /* release the PG_INODE_REF reference */
if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
nfs_release_request(subreq); nfs_release_request(subreq);
else spin_lock(&inode->i_lock);
NFS_I(inode)->nrequests--;
spin_unlock(&inode->i_lock);
} else
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} else { } else {
WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags)); WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
...@@ -572,25 +576,24 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -572,25 +576,24 @@ nfs_lock_and_join_requests(struct page *page)
head->wb_bytes = total_bytes; head->wb_bytes = total_bytes;
} }
/* Postpone destruction of this request */
if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
set_bit(PG_INODE_REF, &head->wb_flags);
kref_get(&head->wb_kref);
NFS_I(inode)->nrequests++;
}
/* /*
* prepare head request to be added to new pgio descriptor * prepare head request to be added to new pgio descriptor
*/ */
nfs_page_group_clear_bits(head); nfs_page_group_clear_bits(head);
/*
* some part of the group was still on the inode list - otherwise
* the group wouldn't be involved in async write.
* grab a reference for the head request, iff it needs one.
*/
if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
kref_get(&head->wb_kref);
nfs_page_group_unlock(head); nfs_page_group_unlock(head);
/* drop lock to clean uprequests on destroy list */ /* drop lock to clean uprequests on destroy list */
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
nfs_destroy_unlinked_subrequests(destroy_list, head); nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
/* still holds ref on head from nfs_page_find_head_request_locked /* still holds ref on head from nfs_page_find_head_request_locked
* and still has lock on head from lock loop */ * and still has lock on head from lock loop */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment