Commit 3322be32 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] remove the buffer_head mempool

mempools have the wrong semantics for use by buffer_heads.  The problem
scenario:

- Process A calls mempool_alloc(), asking for a buffer_head.

- While process A sleeps, process B frees up a ton of memory.

That's it.  There is no longer any memory pressure, so nobody frees any
buffer_heads, so process A does not get woken up.  I managed to trigger this
in some testing recently.

One approach would be to use a schedule_timeout(2) in mempool_alloc().

Anyway, the importance of buffer_head allocation was lessened when swapout
stopped using them, so let's just drop the mempool out of it for now.
parent e1f3b1fc
......@@ -31,7 +31,6 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/mempool.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
......@@ -2791,7 +2790,6 @@ asmlinkage long sys_bdflush(int func, long data)
* Buffer-head allocation
*/
static kmem_cache_t *bh_cachep;
static mempool_t *bh_mempool;
/*
* Once the number of bh's in the machine exceeds this level, we start
......@@ -2825,7 +2823,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(void)
{
struct buffer_head *ret = mempool_alloc(bh_mempool, GFP_NOFS);
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, GFP_NOFS);
if (ret) {
preempt_disable();
__get_cpu_var(bh_accounting).nr++;
......@@ -2839,7 +2837,7 @@ EXPORT_SYMBOL(alloc_buffer_head);
void free_buffer_head(struct buffer_head *bh)
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
mempool_free(bh, bh_mempool);
kmem_cache_free(bh_cachep, bh);
preempt_disable();
__get_cpu_var(bh_accounting).nr--;
recalc_bh_state();
......@@ -2847,7 +2845,8 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL(free_buffer_head);
static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
static void
init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
......@@ -2858,19 +2857,6 @@ static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long fla
}
}
static void *bh_mempool_alloc(int gfp_mask, void *pool_data)
{
return kmem_cache_alloc(bh_cachep, gfp_mask);
}
static void bh_mempool_free(void *element, void *pool_data)
{
return kmem_cache_free(bh_cachep, element);
}
#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20
static void buffer_init_cpu(int cpu)
{
struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
......@@ -2907,8 +2893,6 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
0, init_buffer_head, NULL);
bh_mempool = mempool_create(MAX_UNUSED_BUFFERS, bh_mempool_alloc,
bh_mempool_free, NULL);
for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment