Commit b3c5fd30 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm crypt: sort writes

Write requests are sorted in a red-black tree structure and are
submitted in the sorted order.

In theory the sorting should be performed by the underlying disk
scheduler, however, in practice the disk scheduler only accepts and
sorts a finite number of requests.  To allow the sorting of all
requests, dm-crypt needs to implement its own sorting.

The overhead associated with rbtree-based sorting is considered
negligible so it is not used conditionally.  Even on SSD sorting can be
beneficial since in-order request dispatch promotes lower latency IO
completion to the upper layers.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 0f5d8e6e
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/rbtree.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/hash.h> #include <crypto/hash.h>
...@@ -60,7 +61,7 @@ struct dm_crypt_io { ...@@ -60,7 +61,7 @@ struct dm_crypt_io {
int error; int error;
sector_t sector; sector_t sector;
struct list_head list; struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR; } CRYPTO_MINALIGN_ATTR;
struct dm_crypt_request { struct dm_crypt_request {
...@@ -134,7 +135,7 @@ struct crypt_config { ...@@ -134,7 +135,7 @@ struct crypt_config {
struct task_struct *write_thread; struct task_struct *write_thread;
wait_queue_head_t write_thread_wait; wait_queue_head_t write_thread_wait;
struct list_head write_thread_list; struct rb_root write_tree;
char *cipher; char *cipher;
char *cipher_string; char *cipher_string;
...@@ -1169,11 +1170,15 @@ static void kcryptd_io_write(struct dm_crypt_io *io) ...@@ -1169,11 +1170,15 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
generic_make_request(clone); generic_make_request(clone);
} }
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
static int dmcrypt_write(void *data) static int dmcrypt_write(void *data)
{ {
struct crypt_config *cc = data; struct crypt_config *cc = data;
struct dm_crypt_io *io;
while (1) { while (1) {
struct list_head local_list; struct rb_root write_tree;
struct blk_plug plug; struct blk_plug plug;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
...@@ -1181,7 +1186,7 @@ static int dmcrypt_write(void *data) ...@@ -1181,7 +1186,7 @@ static int dmcrypt_write(void *data)
spin_lock_irq(&cc->write_thread_wait.lock); spin_lock_irq(&cc->write_thread_wait.lock);
continue_locked: continue_locked:
if (!list_empty(&cc->write_thread_list)) if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list; goto pop_from_list;
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
...@@ -1203,20 +1208,22 @@ static int dmcrypt_write(void *data) ...@@ -1203,20 +1208,22 @@ static int dmcrypt_write(void *data)
goto continue_locked; goto continue_locked;
pop_from_list: pop_from_list:
local_list = cc->write_thread_list; write_tree = cc->write_tree;
local_list.next->prev = &local_list; cc->write_tree = RB_ROOT;
local_list.prev->next = &local_list;
INIT_LIST_HEAD(&cc->write_thread_list);
spin_unlock_irq(&cc->write_thread_wait.lock); spin_unlock_irq(&cc->write_thread_wait.lock);
BUG_ON(rb_parent(write_tree.rb_node));
/*
* Note: we cannot walk the tree here with rb_next because
* the structures may be freed when kcryptd_io_write is called.
*/
blk_start_plug(&plug); blk_start_plug(&plug);
do { do {
struct dm_crypt_io *io = container_of(local_list.next, io = crypt_io_from_node(rb_first(&write_tree));
struct dm_crypt_io, list); rb_erase(&io->rb_node, &write_tree);
list_del(&io->list);
kcryptd_io_write(io); kcryptd_io_write(io);
} while (!list_empty(&local_list)); } while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
return 0; return 0;
...@@ -1227,6 +1234,8 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) ...@@ -1227,6 +1234,8 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
struct bio *clone = io->ctx.bio_out; struct bio *clone = io->ctx.bio_out;
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
unsigned long flags; unsigned long flags;
sector_t sector;
struct rb_node **rbp, *parent;
if (unlikely(io->error < 0)) { if (unlikely(io->error < 0)) {
crypt_free_buffer_pages(cc, clone); crypt_free_buffer_pages(cc, clone);
...@@ -1246,7 +1255,19 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) ...@@ -1246,7 +1255,19 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
} }
spin_lock_irqsave(&cc->write_thread_wait.lock, flags); spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
list_add_tail(&io->list, &cc->write_thread_list); rbp = &cc->write_tree.rb_node;
parent = NULL;
sector = io->sector;
while (*rbp) {
parent = *rbp;
if (sector < crypt_io_from_node(parent)->sector)
rbp = &(*rbp)->rb_left;
else
rbp = &(*rbp)->rb_right;
}
rb_link_node(&io->rb_node, parent, rbp);
rb_insert_color(&io->rb_node, &cc->write_tree);
wake_up_locked(&cc->write_thread_wait); wake_up_locked(&cc->write_thread_wait);
spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
} }
...@@ -1836,7 +1857,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1836,7 +1857,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
init_waitqueue_head(&cc->write_thread_wait); init_waitqueue_head(&cc->write_thread_wait);
INIT_LIST_HEAD(&cc->write_thread_list); cc->write_tree = RB_ROOT;
cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
if (IS_ERR(cc->write_thread)) { if (IS_ERR(cc->write_thread)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment