Commit d52f847a authored by Sage Weil's avatar Sage Weil

ceph: rewrite msgpool using mempool_t

Since we don't need to maintain large pools of messages, we can just
use the standard mempool_t.  We maintain a msgpool 'wrapper' because we
need the mempool_t* in the alloc function, and mempool gives us only
pool_data.
Signed-off-by: default avatarSage Weil <sage@newdream.net>
parent 640ef79d
...@@ -7,106 +7,43 @@ ...@@ -7,106 +7,43 @@
#include "msgpool.h" #include "msgpool.h"
/* static void *alloc_fn(gfp_t gfp_mask, void *arg)
* We use msg pools to preallocate memory for messages we expect to {
* receive over the wire, to avoid getting ourselves into OOM struct ceph_msgpool *pool = arg;
* conditions at unexpected times. We take use a few different struct ceph_msg *m;
* strategies:
*
* - for request/response type interactions, we preallocate the
* memory needed for the response when we generate the request.
*
* - for messages we can receive at any time from the MDS, we preallocate
* a pool of messages we can re-use.
*
* - for writeback, we preallocate some number of messages to use for
* requests and their replies, so that we always make forward
* progress.
*
* The msgpool behaves like a mempool_t, but keeps preallocated
* ceph_msgs strung together on a list_head instead of using a pointer
* vector. This avoids vector reallocation when we adjust the number
* of preallocated items (which happens frequently).
*/
m = ceph_msg_new(0, pool->front_len, 0, 0, NULL);
if (IS_ERR(m))
return NULL;
return m;
}
/* static void free_fn(void *element, void *arg)
* Allocate or release as necessary to meet our target pool size.
*/
static int __fill_msgpool(struct ceph_msgpool *pool)
{ {
struct ceph_msg *msg; ceph_msg_put(element);
while (pool->num < pool->min) {
dout("fill_msgpool %p %d/%d allocating\n", pool, pool->num,
pool->min);
spin_unlock(&pool->lock);
msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL);
spin_lock(&pool->lock);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->pool = pool;
list_add(&msg->list_head, &pool->msgs);
pool->num++;
}
while (pool->num > pool->min) {
msg = list_first_entry(&pool->msgs, struct ceph_msg, list_head);
dout("fill_msgpool %p %d/%d releasing %p\n", pool, pool->num,
pool->min, msg);
list_del_init(&msg->list_head);
pool->num--;
ceph_msg_kfree(msg);
}
return 0;
} }
int ceph_msgpool_init(struct ceph_msgpool *pool, int ceph_msgpool_init(struct ceph_msgpool *pool,
int front_len, int min, bool blocking) int front_len, int size, bool blocking)
{ {
int ret;
dout("msgpool_init %p front_len %d min %d\n", pool, front_len, min);
spin_lock_init(&pool->lock);
pool->front_len = front_len; pool->front_len = front_len;
INIT_LIST_HEAD(&pool->msgs); pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
pool->num = 0; if (!pool->pool)
pool->min = min; return -ENOMEM;
pool->blocking = blocking; return 0;
init_waitqueue_head(&pool->wait);
spin_lock(&pool->lock);
ret = __fill_msgpool(pool);
spin_unlock(&pool->lock);
return ret;
} }
void ceph_msgpool_destroy(struct ceph_msgpool *pool) void ceph_msgpool_destroy(struct ceph_msgpool *pool)
{ {
dout("msgpool_destroy %p\n", pool); mempool_destroy(pool->pool);
spin_lock(&pool->lock);
pool->min = 0;
__fill_msgpool(pool);
spin_unlock(&pool->lock);
}
int ceph_msgpool_resv(struct ceph_msgpool *pool, int delta)
{
int ret;
spin_lock(&pool->lock);
dout("msgpool_resv %p delta %d\n", pool, delta);
pool->min += delta;
ret = __fill_msgpool(pool);
spin_unlock(&pool->lock);
return ret;
} }
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
int front_len)
{ {
wait_queue_t wait; if (front_len > pool->front_len) {
struct ceph_msg *msg; struct ceph_msg *msg;
if (front_len && front_len > pool->front_len) {
pr_err("msgpool_get pool %p need front %d, pool size is %d\n", pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
pool, front_len, pool->front_len); pool, front_len, pool->front_len);
WARN_ON(1); WARN_ON(1);
...@@ -115,72 +52,17 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) ...@@ -115,72 +52,17 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len)
msg = ceph_msg_new(0, front_len, 0, 0, NULL); msg = ceph_msg_new(0, front_len, 0, 0, NULL);
if (!IS_ERR(msg)) if (!IS_ERR(msg))
return msg; return msg;
return NULL;
} }
if (!front_len) return mempool_alloc(pool->pool, GFP_NOFS);
front_len = pool->front_len;
if (pool->blocking) {
/* mempool_t behavior; first try to alloc */
msg = ceph_msg_new(0, front_len, 0, 0, NULL);
if (!IS_ERR(msg))
return msg;
}
while (1) {
spin_lock(&pool->lock);
if (likely(pool->num)) {
msg = list_entry(pool->msgs.next, struct ceph_msg,
list_head);
list_del_init(&msg->list_head);
pool->num--;
dout("msgpool_get %p got %p, now %d/%d\n", pool, msg,
pool->num, pool->min);
spin_unlock(&pool->lock);
return msg;
}
pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num,
pool->min, pool->blocking ? "waiting" : "may fail");
spin_unlock(&pool->lock);
if (!pool->blocking) {
WARN_ON(1);
/* maybe we can allocate it now? */
msg = ceph_msg_new(0, front_len, 0, 0, NULL);
if (!IS_ERR(msg))
return msg;
pr_err("msgpool_get %p empty + alloc failed\n", pool);
return ERR_PTR(-ENOMEM);
}
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
schedule();
finish_wait(&pool->wait, &wait);
}
} }
void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
{ {
spin_lock(&pool->lock); /* reset msg front_len; user may have changed it */
if (pool->num < pool->min) { msg->front.iov_len = pool->front_len;
/* reset msg front_len; user may have changed it */ msg->hdr.front_len = cpu_to_le32(pool->front_len);
msg->front.iov_len = pool->front_len;
msg->hdr.front_len = cpu_to_le32(pool->front_len);
kref_init(&msg->kref); /* retake a single ref */ kref_init(&msg->kref); /* retake single ref */
list_add(&msg->list_head, &pool->msgs);
pool->num++;
dout("msgpool_put %p reclaim %p, now %d/%d\n", pool, msg,
pool->num, pool->min);
spin_unlock(&pool->lock);
wake_up(&pool->wait);
} else {
dout("msgpool_put %p drop %p, at %d/%d\n", pool, msg,
pool->num, pool->min);
spin_unlock(&pool->lock);
ceph_msg_kfree(msg);
}
} }
#ifndef _FS_CEPH_MSGPOOL #ifndef _FS_CEPH_MSGPOOL
#define _FS_CEPH_MSGPOOL #define _FS_CEPH_MSGPOOL
#include <linux/mempool.h>
#include "messenger.h" #include "messenger.h"
/* /*
...@@ -8,18 +9,13 @@ ...@@ -8,18 +9,13 @@
* avoid unexpected OOM conditions. * avoid unexpected OOM conditions.
*/ */
struct ceph_msgpool { struct ceph_msgpool {
spinlock_t lock; mempool_t *pool;
int front_len; /* preallocated payload size */ int front_len; /* preallocated payload size */
struct list_head msgs; /* msgs in the pool; each has 1 ref */
int num, min; /* cur, min # msgs in the pool */
bool blocking;
wait_queue_head_t wait;
}; };
extern int ceph_msgpool_init(struct ceph_msgpool *pool, extern int ceph_msgpool_init(struct ceph_msgpool *pool,
int front_len, int size, bool blocking); int front_len, int size, bool blocking);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
extern int ceph_msgpool_resv(struct ceph_msgpool *, int delta);
extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
int front_len); int front_len);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment