Commit 6e8267f5 authored by Andi Kleen's avatar Andi Kleen Committed by root

direct-io: use a slab cache for struct dio

A direct slab call is slightly faster than kmalloc and can be better cached
per CPU. It also avoids rounding to the next kmalloc slab.

In addition this enforces cache line alignment for struct dio to avoid
any false sharing.
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Acked-by: default avatarJeff Moyer <jmoyer@redhat.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 0dc2bc49
...@@ -140,7 +140,9 @@ struct dio { ...@@ -140,7 +140,9 @@ struct dio {
* wish that they not be zeroed. * wish that they not be zeroed.
*/ */
struct page *pages[DIO_PAGES]; /* page buffer */ struct page *pages[DIO_PAGES]; /* page buffer */
}; } ____cacheline_aligned_in_smp;
static struct kmem_cache *dio_cache __read_mostly;
static void __inode_dio_wait(struct inode *inode) static void __inode_dio_wait(struct inode *inode)
{ {
...@@ -330,7 +332,7 @@ static void dio_bio_end_aio(struct bio *bio, int error) ...@@ -330,7 +332,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
if (remaining == 0) { if (remaining == 0) {
dio_complete(dio, dio->iocb->ki_pos, 0, true); dio_complete(dio, dio->iocb->ki_pos, 0, true);
kfree(dio); kmem_cache_free(dio_cache, dio);
} }
} }
...@@ -1180,7 +1182,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -1180,7 +1182,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
if (ret2 == 0) { if (ret2 == 0) {
ret = dio_complete(dio, offset, ret, false); ret = dio_complete(dio, offset, ret, false);
kfree(dio); kmem_cache_free(dio_cache, dio);
} else } else
BUG_ON(ret != -EIOCBQUEUED); BUG_ON(ret != -EIOCBQUEUED);
...@@ -1256,7 +1258,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -1256,7 +1258,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (rw == READ && end == offset) if (rw == READ && end == offset)
return 0; return 0;
dio = kmalloc(sizeof(*dio), GFP_KERNEL); dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
retval = -ENOMEM; retval = -ENOMEM;
if (!dio) if (!dio)
goto out; goto out;
...@@ -1280,7 +1282,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -1280,7 +1282,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
end - 1); end - 1);
if (retval) { if (retval) {
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
kfree(dio); kmem_cache_free(dio_cache, dio);
goto out; goto out;
} }
} }
...@@ -1308,3 +1310,10 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -1308,3 +1310,10 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
return retval; return retval;
} }
EXPORT_SYMBOL(__blockdev_direct_IO); EXPORT_SYMBOL(__blockdev_direct_IO);
static __init int dio_init(void)
{
dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
return 0;
}
module_init(dio_init)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment