Commit cbefa53c authored by Trond Myklebust's avatar Trond Myklebust Committed by Anna Schumaker

NFS: Convert the remaining pagelist helper functions to support folios

Allow creation of subrequests from a request that is carrying a folio.
Add helpers to set up and tear down requests carrying folios.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 6dd85e83
...@@ -466,10 +466,9 @@ nfs_page_group_destroy(struct kref *kref) ...@@ -466,10 +466,9 @@ nfs_page_group_destroy(struct kref *kref)
nfs_release_request(head); nfs_release_request(head);
} }
static struct nfs_page * static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page, unsigned int pgbase, pgoff_t index,
unsigned int pgbase, unsigned int offset, unsigned int offset, unsigned int count)
unsigned int count)
{ {
struct nfs_page *req; struct nfs_page *req;
struct nfs_open_context *ctx = l_ctx->open_context; struct nfs_open_context *ctx = l_ctx->open_context;
...@@ -488,19 +487,32 @@ __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page, ...@@ -488,19 +487,32 @@ __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
/* Initialize the request struct. Initially, we assume a /* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in * long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */ * update_nfs_request below if the region is not locked. */
req->wb_page = page;
if (page) {
req->wb_index = page_index(page);
get_page(page);
}
req->wb_offset = offset;
req->wb_pgbase = pgbase; req->wb_pgbase = pgbase;
req->wb_index = index;
req->wb_offset = offset;
req->wb_bytes = count; req->wb_bytes = count;
kref_init(&req->wb_kref); kref_init(&req->wb_kref);
req->wb_nio = 0; req->wb_nio = 0;
return req; return req;
} }
static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
{
if (folio != NULL) {
req->wb_folio = folio;
folio_get(folio);
set_bit(PG_FOLIO, &req->wb_flags);
}
}
static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
{
if (page != NULL) {
req->wb_page = page;
get_page(page);
}
}
/** /**
* nfs_create_request - Create an NFS read/write request. * nfs_create_request - Create an NFS read/write request.
* @ctx: open context to use * @ctx: open context to use
...@@ -521,9 +533,11 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, ...@@ -521,9 +533,11 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
if (IS_ERR(l_ctx)) if (IS_ERR(l_ctx))
return ERR_CAST(l_ctx); return ERR_CAST(l_ctx);
ret = __nfs_create_request(l_ctx, page, offset, offset, count); ret = nfs_page_create(l_ctx, offset, page_index(page), offset, count);
if (!IS_ERR(ret)) if (!IS_ERR(ret)) {
nfs_page_assign_page(ret, page);
nfs_page_group_init(ret, NULL); nfs_page_group_init(ret, NULL);
}
nfs_put_lock_context(l_ctx); nfs_put_lock_context(l_ctx);
return ret; return ret;
} }
...@@ -536,11 +550,16 @@ nfs_create_subreq(struct nfs_page *req, ...@@ -536,11 +550,16 @@ nfs_create_subreq(struct nfs_page *req,
{ {
struct nfs_page *last; struct nfs_page *last;
struct nfs_page *ret; struct nfs_page *ret;
struct folio *folio = nfs_page_to_folio(req);
struct page *page = nfs_page_to_page(req, pgbase); struct page *page = nfs_page_to_page(req, pgbase);
ret = __nfs_create_request(req->wb_lock_context, page, pgbase, offset, ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
count); offset, count);
if (!IS_ERR(ret)) { if (!IS_ERR(ret)) {
if (folio)
nfs_page_assign_folio(ret, folio);
else
nfs_page_assign_page(ret, page);
/* find the last request */ /* find the last request */
for (last = req->wb_head; for (last = req->wb_head;
last->wb_this_page != req->wb_head; last->wb_this_page != req->wb_head;
...@@ -548,7 +567,6 @@ nfs_create_subreq(struct nfs_page *req, ...@@ -548,7 +567,6 @@ nfs_create_subreq(struct nfs_page *req,
; ;
nfs_lock_request(ret); nfs_lock_request(ret);
ret->wb_index = req->wb_index;
nfs_page_group_init(ret, last); nfs_page_group_init(ret, last);
ret->wb_nio = req->wb_nio; ret->wb_nio = req->wb_nio;
} }
...@@ -587,11 +605,16 @@ void nfs_unlock_and_release_request(struct nfs_page *req) ...@@ -587,11 +605,16 @@ void nfs_unlock_and_release_request(struct nfs_page *req)
*/ */
static void nfs_clear_request(struct nfs_page *req) static void nfs_clear_request(struct nfs_page *req)
{ {
struct folio *folio = nfs_page_to_folio(req);
struct page *page = req->wb_page; struct page *page = req->wb_page;
struct nfs_lock_context *l_ctx = req->wb_lock_context; struct nfs_lock_context *l_ctx = req->wb_lock_context;
struct nfs_open_context *ctx; struct nfs_open_context *ctx;
if (page != NULL) { if (folio != NULL) {
folio_put(folio);
req->wb_folio = NULL;
clear_bit(PG_FOLIO, &req->wb_flags);
} else if (page != NULL) {
put_page(page); put_page(page);
req->wb_page = NULL; req->wb_page = NULL;
} }
...@@ -1471,18 +1494,23 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) ...@@ -1471,18 +1494,23 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{ {
struct nfs_pgio_mirror *mirror; struct nfs_pgio_mirror *mirror;
struct nfs_page *prev; struct nfs_page *prev;
struct folio *folio;
u32 midx; u32 midx;
for (midx = 0; midx < desc->pg_mirror_count; midx++) { for (midx = 0; midx < desc->pg_mirror_count; midx++) {
mirror = nfs_pgio_get_mirror(desc, midx); mirror = nfs_pgio_get_mirror(desc, midx);
if (!list_empty(&mirror->pg_list)) { if (!list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev); prev = nfs_list_entry(mirror->pg_list.prev);
if (index != prev->wb_index + 1) { folio = nfs_page_to_folio(prev);
if (folio) {
if (index == folio_next_index(folio))
continue;
} else if (index == prev->wb_index + 1)
continue;
nfs_pageio_complete(desc); nfs_pageio_complete(desc);
break; break;
} }
} }
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment