Commit a087e2b5 authored by Bijan Mottahedeh's avatar Bijan Mottahedeh Committed by Jens Axboe

io_uring: add wrappers for memory accounting

Facilitate separation of locked memory usage reporting vs. limiting for
upcoming patches.  No functional changes.
Signed-off-by: default avatarBijan Mottahedeh <bijan.mottahedeh@oracle.com>
[axboe: kill unnecessary () around return in io_account_mem()]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a31eb4a2
...@@ -6968,12 +6968,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, ...@@ -6968,12 +6968,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
return ret; return ret;
} }
static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) static inline void __io_unaccount_mem(struct user_struct *user,
unsigned long nr_pages)
{ {
atomic_long_sub(nr_pages, &user->locked_vm); atomic_long_sub(nr_pages, &user->locked_vm);
} }
static int io_account_mem(struct user_struct *user, unsigned long nr_pages) static inline int __io_account_mem(struct user_struct *user,
unsigned long nr_pages)
{ {
unsigned long page_limit, cur_pages, new_pages; unsigned long page_limit, cur_pages, new_pages;
...@@ -6991,6 +6993,20 @@ static int io_account_mem(struct user_struct *user, unsigned long nr_pages) ...@@ -6991,6 +6993,20 @@ static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
return 0; return 0;
} }
static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
if (ctx->account_mem)
__io_unaccount_mem(ctx->user, nr_pages);
}
static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
if (ctx->account_mem)
return __io_account_mem(ctx->user, nr_pages);
return 0;
}
static void io_mem_free(void *ptr) static void io_mem_free(void *ptr)
{ {
struct page *page; struct page *page;
...@@ -7065,8 +7081,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) ...@@ -7065,8 +7081,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
for (j = 0; j < imu->nr_bvecs; j++) for (j = 0; j < imu->nr_bvecs; j++)
unpin_user_page(imu->bvec[j].bv_page); unpin_user_page(imu->bvec[j].bv_page);
if (ctx->account_mem) io_unaccount_mem(ctx, imu->nr_bvecs);
io_unaccount_mem(ctx->user, imu->nr_bvecs);
kvfree(imu->bvec); kvfree(imu->bvec);
imu->nr_bvecs = 0; imu->nr_bvecs = 0;
} }
...@@ -7149,11 +7164,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -7149,11 +7164,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
start = ubuf >> PAGE_SHIFT; start = ubuf >> PAGE_SHIFT;
nr_pages = end - start; nr_pages = end - start;
if (ctx->account_mem) { ret = io_account_mem(ctx, nr_pages);
ret = io_account_mem(ctx->user, nr_pages); if (ret)
if (ret) goto err;
goto err;
}
ret = 0; ret = 0;
if (!pages || nr_pages > got_pages) { if (!pages || nr_pages > got_pages) {
...@@ -7166,8 +7179,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -7166,8 +7179,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
GFP_KERNEL); GFP_KERNEL);
if (!pages || !vmas) { if (!pages || !vmas) {
ret = -ENOMEM; ret = -ENOMEM;
if (ctx->account_mem) io_unaccount_mem(ctx, nr_pages);
io_unaccount_mem(ctx->user, nr_pages);
goto err; goto err;
} }
got_pages = nr_pages; got_pages = nr_pages;
...@@ -7177,8 +7189,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -7177,8 +7189,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
GFP_KERNEL); GFP_KERNEL);
ret = -ENOMEM; ret = -ENOMEM;
if (!imu->bvec) { if (!imu->bvec) {
if (ctx->account_mem) io_unaccount_mem(ctx, nr_pages);
io_unaccount_mem(ctx->user, nr_pages);
goto err; goto err;
} }
...@@ -7209,8 +7220,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -7209,8 +7220,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
*/ */
if (pret > 0) if (pret > 0)
unpin_user_pages(pages, pret); unpin_user_pages(pages, pret);
if (ctx->account_mem) io_unaccount_mem(ctx, nr_pages);
io_unaccount_mem(ctx->user, nr_pages);
kvfree(imu->bvec); kvfree(imu->bvec);
goto err; goto err;
} }
...@@ -7315,9 +7325,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -7315,9 +7325,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_mem_free(ctx->sq_sqes); io_mem_free(ctx->sq_sqes);
percpu_ref_exit(&ctx->refs); percpu_ref_exit(&ctx->refs);
if (ctx->account_mem) io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries));
io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user); free_uid(ctx->user);
put_cred(ctx->creds); put_cred(ctx->creds);
kfree(ctx->cancel_hash); kfree(ctx->cancel_hash);
...@@ -7887,7 +7895,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -7887,7 +7895,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
account_mem = !capable(CAP_IPC_LOCK); account_mem = !capable(CAP_IPC_LOCK);
if (account_mem) { if (account_mem) {
ret = io_account_mem(user, ret = __io_account_mem(user,
ring_pages(p->sq_entries, p->cq_entries)); ring_pages(p->sq_entries, p->cq_entries));
if (ret) { if (ret) {
free_uid(user); free_uid(user);
...@@ -7898,7 +7906,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -7898,7 +7906,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
ctx = io_ring_ctx_alloc(p); ctx = io_ring_ctx_alloc(p);
if (!ctx) { if (!ctx) {
if (account_mem) if (account_mem)
io_unaccount_mem(user, ring_pages(p->sq_entries, __io_unaccount_mem(user, ring_pages(p->sq_entries,
p->cq_entries)); p->cq_entries));
free_uid(user); free_uid(user);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment