Commit 0d4597c8 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by David S. Miller

net/rds: Track user mapped pages through special API

Convert net/rds to use the newly introduces pin_user_pages() API,
which properly sets FOLL_PIN. Setting FOLL_PIN is now required for
code that requires tracking of pinned pages.

Note that this effectively changes the code's behavior: it now
ultimately calls set_page_dirty_lock(), instead of set_page_dirty().
This is probably more accurate.

As Christoph Hellwig put it, "set_page_dirty() is only safe if we are
dealing with a file backed page where we have reference on the inode it
hangs off." [1]

[1] https://lore.kernel.org/r/20190723153640.GB720@lst.de

Cc: Hans Westgaard Ry <hans.westgaard.ry@oracle.com>
Cc: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent afecdb37
...@@ -162,10 +162,9 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, ...@@ -162,10 +162,9 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
if (write) if (write)
gup_flags |= FOLL_WRITE; gup_flags |= FOLL_WRITE;
ret = get_user_pages_fast(user_addr, nr_pages, gup_flags, pages); ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
if (ret >= 0 && ret < nr_pages) { if (ret >= 0 && ret < nr_pages) {
while (ret--) unpin_user_pages(pages, ret);
put_page(pages[ret]);
ret = -EFAULT; ret = -EFAULT;
} }
...@@ -300,8 +299,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, ...@@ -300,8 +299,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
* to release anything. * to release anything.
*/ */
if (!need_odp) { if (!need_odp) {
for (i = 0 ; i < nents; i++) unpin_user_pages(pages, nr_pages);
put_page(sg_page(&sg[i]));
kfree(sg); kfree(sg);
} }
ret = PTR_ERR(trans_private); ret = PTR_ERR(trans_private);
...@@ -325,7 +323,12 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, ...@@ -325,7 +323,12 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
if (cookie_ret) if (cookie_ret)
*cookie_ret = cookie; *cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { if (args->cookie_addr &&
put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
if (!need_odp) {
unpin_user_pages(pages, nr_pages);
kfree(sg);
}
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
...@@ -496,9 +499,7 @@ void rds_rdma_free_op(struct rm_rdma_op *ro) ...@@ -496,9 +499,7 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
* is the case for a RDMA_READ which copies from remote * is the case for a RDMA_READ which copies from remote
* to local memory * to local memory
*/ */
if (!ro->op_write) unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
set_page_dirty(page);
put_page(page);
} }
} }
...@@ -515,8 +516,7 @@ void rds_atomic_free_op(struct rm_atomic_op *ao) ...@@ -515,8 +516,7 @@ void rds_atomic_free_op(struct rm_atomic_op *ao)
/* Mark page dirty if it was possibly modified, which /* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote * is the case for a RDMA_READ which copies from remote
* to local memory */ * to local memory */
set_page_dirty(page); unpin_user_pages_dirty_lock(&page, 1, true);
put_page(page);
kfree(ao->op_notifier); kfree(ao->op_notifier);
ao->op_notifier = NULL; ao->op_notifier = NULL;
...@@ -944,7 +944,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, ...@@ -944,7 +944,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
return ret; return ret;
err: err:
if (page) if (page)
put_page(page); unpin_user_page(page);
rm->atomic.op_active = 0; rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier); kfree(rm->atomic.op_notifier);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment