Commit 91a76be3 authored by Dominique Martinet's avatar Dominique Martinet

9p: add a per-client fcall kmem_cache

Having a specific cache for the fcall allocations helps speed up
end-to-end latency.

The caches will automatically be merged if there are multiple caches
of items with the same size so we do not need to try to share a cache
between different clients of the same size.

Since the msize is negotiated with the server, only allocate the cache
after that negotiation has happened - previous allocations or
allocations of different sizes (e.g. zero-copy fcall) are made with
kmalloc directly.

Some figures on two beefy VMs with Connect-IB (sriov) / trans=rdma,
with ior running 32 processes in parallel doing small 32 bytes IOs:
 - no alloc (4.18-rc7 request cache): 65.4k req/s
 - non-power of two alloc, no patch: 61.6k req/s
 - power of two alloc, no patch: 62.2k req/s
 - non-power of two alloc, with patch: 64.7k req/s
 - power of two alloc, with patch: 65.1k req/s

Link: http://lkml.kernel.org/r/1532943263-24378-2-git-send-email-asmadeus@codewreck.orgSigned-off-by: default avatarDominique Martinet <dominique.martinet@cea.fr>
Acked-by: default avatarJun Piao <piaojun@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Greg Kurz <groug@kaod.org>
parent 523adb6c
......@@ -336,6 +336,9 @@ enum p9_qid_t {
#define P9_NOFID (u32)(~0)
#define P9_MAXWELEM 16
/* Minimal header size: size[4] type[1] tag[2] */
#define P9_HDRSZ 7
/* ample room for Twrite/Rread header */
#define P9_IOHDRSZ 24
......@@ -558,6 +561,7 @@ struct p9_fcall {
size_t offset;
size_t capacity;
struct kmem_cache *cache;
u8 *sdata;
};
......
......@@ -123,6 +123,7 @@ struct p9_client {
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
void *trans;
struct kmem_cache *fcall_cache;
union {
struct {
......
......@@ -231,9 +231,16 @@ static int parse_opts(char *opts, struct p9_client *clnt)
return ret;
}
static int p9_fcall_init(struct p9_fcall *fc, int alloc_msize)
static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
int alloc_msize)
{
fc->sdata = kmalloc(alloc_msize, GFP_NOFS);
if (likely(c->fcall_cache) && alloc_msize == c->msize) {
fc->sdata = kmem_cache_alloc(c->fcall_cache, GFP_NOFS);
fc->cache = c->fcall_cache;
} else {
fc->sdata = kmalloc(alloc_msize, GFP_NOFS);
fc->cache = NULL;
}
if (!fc->sdata)
return -ENOMEM;
fc->capacity = alloc_msize;
......@@ -242,7 +249,16 @@ static int p9_fcall_init(struct p9_fcall *fc, int alloc_msize)
void p9_fcall_fini(struct p9_fcall *fc)
{
kfree(fc->sdata);
/* sdata can be NULL for interrupted requests in trans_rdma,
* and kmem_cache_free does not do NULL-check for us
*/
if (unlikely(!fc->sdata))
return;
if (fc->cache)
kmem_cache_free(fc->cache, fc->sdata);
else
kfree(fc->sdata);
}
EXPORT_SYMBOL(p9_fcall_fini);
......@@ -267,9 +283,9 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
if (!req)
return NULL;
if (p9_fcall_init(&req->tc, alloc_msize))
if (p9_fcall_init(c, &req->tc, alloc_msize))
goto free_req;
if (p9_fcall_init(&req->rc, alloc_msize))
if (p9_fcall_init(c, &req->rc, alloc_msize))
goto free;
p9pdu_reset(&req->tc);
......@@ -951,6 +967,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
clnt->trans_mod = NULL;
clnt->trans = NULL;
clnt->fcall_cache = NULL;
client_id = utsname()->nodename;
memcpy(clnt->name, client_id, strlen(client_id) + 1);
......@@ -987,6 +1004,15 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (err)
goto close_trans;
/* P9_HDRSZ + 4 is the smallest packet header we can have that is
* followed by data accessed from userspace by read
*/
clnt->fcall_cache =
kmem_cache_create_usercopy("9p-fcall-cache", clnt->msize,
0, 0, P9_HDRSZ + 4,
clnt->msize - (P9_HDRSZ + 4),
NULL);
return clnt;
close_trans:
......@@ -1018,6 +1044,7 @@ void p9_client_destroy(struct p9_client *clnt)
p9_tag_cleanup(clnt);
kmem_cache_destroy(clnt->fcall_cache);
kfree(clnt);
}
EXPORT_SYMBOL(p9_client_destroy);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment