Commit c917cfaf authored by Trond Myklebust's avatar Trond Myklebust Committed by Anna Schumaker

NFS: Fix up NFS I/O subrequest creation

We require all NFS I/O subrequests to duplicate the lock context as well
as the open context.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 6fbda89b
...@@ -295,25 +295,13 @@ nfs_page_group_destroy(struct kref *kref) ...@@ -295,25 +295,13 @@ nfs_page_group_destroy(struct kref *kref)
nfs_release_request(head); nfs_release_request(head);
} }
/** static struct nfs_page *
* nfs_create_request - Create an NFS read/write request. __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
* @ctx: open context to use struct nfs_page *last, unsigned int pgbase,
* @page: page to write unsigned int offset, unsigned int count)
* @last: last nfs request created for this page group or NULL if head
* @offset: starting offset within the page for the write
* @count: number of bytes to read/write
*
* The page must be locked by the caller. This makes sure we never
* create two different requests for the same page.
* User should ensure it is safe to sleep in this function.
*/
struct nfs_page *
nfs_create_request(struct nfs_open_context *ctx, struct page *page,
struct nfs_page *last, unsigned int offset,
unsigned int count)
{ {
struct nfs_page *req; struct nfs_page *req;
struct nfs_lock_context *l_ctx; struct nfs_open_context *ctx = l_ctx->open_context;
if (test_bit(NFS_CONTEXT_BAD, &ctx->flags)) if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
return ERR_PTR(-EBADF); return ERR_PTR(-EBADF);
...@@ -322,13 +310,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, ...@@ -322,13 +310,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
if (req == NULL) if (req == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* get lock context early so we can deal with alloc failures */
l_ctx = nfs_get_lock_context(ctx);
if (IS_ERR(l_ctx)) {
nfs_page_free(req);
return ERR_CAST(l_ctx);
}
req->wb_lock_context = l_ctx; req->wb_lock_context = l_ctx;
refcount_inc(&l_ctx->count);
atomic_inc(&l_ctx->io_count); atomic_inc(&l_ctx->io_count);
/* Initialize the request struct. Initially, we assume a /* Initialize the request struct. Initially, we assume a
...@@ -340,7 +323,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, ...@@ -340,7 +323,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
get_page(page); get_page(page);
} }
req->wb_offset = offset; req->wb_offset = offset;
req->wb_pgbase = offset; req->wb_pgbase = pgbase;
req->wb_bytes = count; req->wb_bytes = count;
req->wb_context = get_nfs_open_context(ctx); req->wb_context = get_nfs_open_context(ctx);
kref_init(&req->wb_kref); kref_init(&req->wb_kref);
...@@ -348,6 +331,49 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, ...@@ -348,6 +331,49 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
return req; return req;
} }
/**
* nfs_create_request - Create an NFS read/write request.
* @ctx: open context to use
* @page: page to write
* @last: last nfs request created for this page group or NULL if head
* @offset: starting offset within the page for the write
* @count: number of bytes to read/write
*
* The page must be locked by the caller. This makes sure we never
* create two different requests for the same page.
* User should ensure it is safe to sleep in this function.
*/
struct nfs_page *
nfs_create_request(struct nfs_open_context *ctx, struct page *page,
struct nfs_page *last, unsigned int offset,
unsigned int count)
{
struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
struct nfs_page *ret;
if (IS_ERR(l_ctx))
return ERR_CAST(l_ctx);
ret = __nfs_create_request(l_ctx, page, last, offset, offset, count);
nfs_put_lock_context(l_ctx);
return ret;
}
static struct nfs_page *
nfs_create_subreq(struct nfs_page *req, struct nfs_page *last,
unsigned int pgbase, unsigned int offset,
unsigned int count)
{
struct nfs_page *ret;
ret = __nfs_create_request(req->wb_lock_context, req->wb_page, last,
pgbase, offset, count);
if (!IS_ERR(ret)) {
nfs_lock_request(ret);
ret->wb_index = req->wb_index;
}
return ret;
}
/** /**
* nfs_unlock_request - Unlock request and wake up sleepers. * nfs_unlock_request - Unlock request and wake up sleepers.
* @req: pointer to request * @req: pointer to request
...@@ -1049,14 +1075,10 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, ...@@ -1049,14 +1075,10 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
pgbase += subreq->wb_bytes; pgbase += subreq->wb_bytes;
if (bytes_left) { if (bytes_left) {
subreq = nfs_create_request(req->wb_context, subreq = nfs_create_subreq(req, subreq, pgbase,
req->wb_page, offset, bytes_left);
subreq, pgbase, bytes_left);
if (IS_ERR(subreq)) if (IS_ERR(subreq))
goto err_ptr; goto err_ptr;
nfs_lock_request(subreq);
subreq->wb_offset = offset;
subreq->wb_index = req->wb_index;
} }
} while (bytes_left > 0); } while (bytes_left > 0);
...@@ -1158,19 +1180,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, ...@@ -1158,19 +1180,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
lastreq = lastreq->wb_this_page) lastreq = lastreq->wb_this_page)
; ;
dupreq = nfs_create_request(req->wb_context, dupreq = nfs_create_subreq(req, lastreq,
req->wb_page, lastreq, pgbase, bytes); pgbase, offset, bytes);
if (IS_ERR(dupreq)) {
nfs_page_group_unlock(req); nfs_page_group_unlock(req);
if (IS_ERR(dupreq)) {
desc->pg_error = PTR_ERR(dupreq); desc->pg_error = PTR_ERR(dupreq);
goto out_failed; goto out_failed;
} }
nfs_lock_request(dupreq);
nfs_page_group_unlock(req);
dupreq->wb_offset = offset;
dupreq->wb_index = req->wb_index;
} else } else
dupreq = req; dupreq = req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment