Commit 400a0bef authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm bufio: add sector start offset to dm-bufio interface

Introduce dm_bufio_set_sector_offset() interface to allow setting a
sector offset for a dm-bufio client.  This is a prereq for the DM
integrity target.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMilan Broz <gmazyland@gmail.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 9b4b5a79
...@@ -110,6 +110,8 @@ struct dm_bufio_client { ...@@ -110,6 +110,8 @@ struct dm_bufio_client {
struct rb_root buffer_tree; struct rb_root buffer_tree;
wait_queue_head_t free_buffer_wait; wait_queue_head_t free_buffer_wait;
sector_t start;
int async_write_error; int async_write_error;
struct list_head client_list; struct list_head client_list;
...@@ -557,8 +559,8 @@ static void dmio_complete(unsigned long error, void *context) ...@@ -557,8 +559,8 @@ static void dmio_complete(unsigned long error, void *context)
b->bio.bi_end_io(&b->bio); b->bio.bi_end_io(&b->bio);
} }
static void use_dmio(struct dm_buffer *b, int rw, sector_t block, static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
bio_end_io_t *end_io) unsigned n_sectors, bio_end_io_t *end_io)
{ {
int r; int r;
struct dm_io_request io_req = { struct dm_io_request io_req = {
...@@ -570,8 +572,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, ...@@ -570,8 +572,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
}; };
struct dm_io_region region = { struct dm_io_region region = {
.bdev = b->c->bdev, .bdev = b->c->bdev,
.sector = block << b->c->sectors_per_block_bits, .sector = sector,
.count = b->c->block_size >> SECTOR_SHIFT, .count = n_sectors,
}; };
if (b->data_mode != DATA_MODE_VMALLOC) { if (b->data_mode != DATA_MODE_VMALLOC) {
...@@ -606,14 +608,14 @@ static void inline_endio(struct bio *bio) ...@@ -606,14 +608,14 @@ static void inline_endio(struct bio *bio)
end_fn(bio); end_fn(bio);
} }
static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
bio_end_io_t *end_io) unsigned n_sectors, bio_end_io_t *end_io)
{ {
char *ptr; char *ptr;
int len; int len;
bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; b->bio.bi_iter.bi_sector = sector;
b->bio.bi_bdev = b->c->bdev; b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = inline_endio; b->bio.bi_end_io = inline_endio;
/* /*
...@@ -628,7 +630,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, ...@@ -628,7 +630,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
* If len < PAGE_SIZE the buffer doesn't cross page boundary. * If len < PAGE_SIZE the buffer doesn't cross page boundary.
*/ */
ptr = b->data; ptr = b->data;
len = b->c->block_size; len = n_sectors << SECTOR_SHIFT;
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE)
BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
...@@ -640,7 +642,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, ...@@ -640,7 +642,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
len < PAGE_SIZE ? len : PAGE_SIZE, len < PAGE_SIZE ? len : PAGE_SIZE,
offset_in_page(ptr))) { offset_in_page(ptr))) {
BUG_ON(b->c->block_size <= PAGE_SIZE); BUG_ON(b->c->block_size <= PAGE_SIZE);
use_dmio(b, rw, block, end_io); use_dmio(b, rw, sector, n_sectors, end_io);
return; return;
} }
...@@ -651,17 +653,22 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, ...@@ -651,17 +653,22 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
submit_bio(&b->bio); submit_bio(&b->bio);
} }
static void submit_io(struct dm_buffer *b, int rw, sector_t block, static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
bio_end_io_t *end_io)
{ {
unsigned n_sectors;
sector_t sector;
if (rw == WRITE && b->c->write_callback) if (rw == WRITE && b->c->write_callback)
b->c->write_callback(b); b->c->write_callback(b);
if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
n_sectors = 1 << b->c->sectors_per_block_bits;
if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) &&
b->data_mode != DATA_MODE_VMALLOC) b->data_mode != DATA_MODE_VMALLOC)
use_inline_bio(b, rw, block, end_io); use_inline_bio(b, rw, sector, n_sectors, end_io);
else else
use_dmio(b, rw, block, end_io); use_dmio(b, rw, sector, n_sectors, end_io);
} }
/*---------------------------------------------------------------- /*----------------------------------------------------------------
...@@ -713,7 +720,7 @@ static void __write_dirty_buffer(struct dm_buffer *b, ...@@ -713,7 +720,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
if (!write_list) if (!write_list)
submit_io(b, WRITE, b->block, write_endio); submit_io(b, WRITE, write_endio);
else else
list_add_tail(&b->write_list, write_list); list_add_tail(&b->write_list, write_list);
} }
...@@ -726,7 +733,7 @@ static void __flush_write_list(struct list_head *write_list) ...@@ -726,7 +733,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b = struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list); list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list); list_del(&b->write_list);
submit_io(b, WRITE, b->block, write_endio); submit_io(b, WRITE, write_endio);
cond_resched(); cond_resched();
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
...@@ -1094,7 +1101,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, ...@@ -1094,7 +1101,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL; return NULL;
if (need_submit) if (need_submit)
submit_io(b, READ, b->block, read_endio); submit_io(b, READ, read_endio);
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
...@@ -1164,7 +1171,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, ...@@ -1164,7 +1171,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c); dm_bufio_unlock(c);
if (need_submit) if (need_submit)
submit_io(b, READ, b->block, read_endio); submit_io(b, READ, read_endio);
dm_bufio_release(b); dm_bufio_release(b);
cond_resched(); cond_resched();
...@@ -1405,7 +1412,7 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) ...@@ -1405,7 +1412,7 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
old_block = b->block; old_block = b->block;
__unlink_buffer(b); __unlink_buffer(b);
__link_buffer(b, new_block, b->list_mode); __link_buffer(b, new_block, b->list_mode);
submit_io(b, WRITE, new_block, write_endio); submit_io(b, WRITE, write_endio);
wait_on_bit_io(&b->state, B_WRITING, wait_on_bit_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
__unlink_buffer(b); __unlink_buffer(b);
...@@ -1762,6 +1769,12 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) ...@@ -1762,6 +1769,12 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
} }
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
{
c->start = start;
}
EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void) static unsigned get_max_age_hz(void)
{ {
unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
......
...@@ -31,6 +31,13 @@ dm_bufio_client_create(struct block_device *bdev, unsigned block_size, ...@@ -31,6 +31,13 @@ dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
*/ */
void dm_bufio_client_destroy(struct dm_bufio_client *c); void dm_bufio_client_destroy(struct dm_bufio_client *c);
/*
* Set the sector range.
* When this function is called, there must be no I/O in progress on the bufio
* client.
*/
void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
/* /*
* WARNING: to avoid deadlocks, these conditions are observed: * WARNING: to avoid deadlocks, these conditions are observed:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment