Commit 60c42a31 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-4.12/dm-fixes-3' of...

Merge tag 'for-4.12/dm-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - a DM verity fix for a mode when no salt is used

 - a fix to DM to account for the possibility that PREFLUSH or FUA are
   used without the SYNC flag if the underlying storage doesn't have a
   volatile write-cache

 - a DM ioctl memory allocation flag fix to use __GFP_HIGH to allow
   emergency forward progress (by using memory reserves as last resort)

 - a small DM integrity cleanup to use kvmalloc() instead of duplicating
   the same

* tag 'for-4.12/dm-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: make flush bios explicitly sync
  dm ioctl: restore __GFP_HIGH in copy_params()
  dm integrity: use kvmalloc() instead of dm_integrity_kvmalloc()
  dm verity: fix no salt use case
parents 6f37fa43 ff0361b3
...@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) ...@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
{ {
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
.bi_op_flags = REQ_PREFLUSH, .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = c->dm_io, .client = c->dm_io,
......
...@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi ...@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
for (i = 0; i < commit_sections; i++) for (i = 0; i < commit_sections; i++)
rw_section_mac(ic, commit_start + i, true); rw_section_mac(ic, commit_start + i, true);
} }
rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
commit_sections, &io_comp);
} else { } else {
unsigned to_end; unsigned to_end;
io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
...@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) ...@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
blk_queue_max_integrity_segments(disk->queue, UINT_MAX); blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
} }
/* FIXME: use new kvmalloc */
static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
{
void *ptr = NULL;
if (size <= PAGE_SIZE)
ptr = kmalloc(size, GFP_KERNEL | gfp);
if (!ptr && size <= KMALLOC_MAX_SIZE)
ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
if (!ptr)
ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
return ptr;
}
static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
{ {
unsigned i; unsigned i;
...@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic) ...@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
struct page_list *pl; struct page_list *pl;
unsigned i; unsigned i;
pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
if (!pl) if (!pl)
return NULL; return NULL;
...@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int ...@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
struct scatterlist **sl; struct scatterlist **sl;
unsigned i; unsigned i;
sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
if (!sl) if (!sl)
return NULL; return NULL;
...@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int ...@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
n_pages = (end_index - start_index + 1); n_pages = (end_index - start_index + 1);
s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
if (!s) { if (!s) {
dm_integrity_free_journal_scatterlist(ic, sl); dm_integrity_free_journal_scatterlist(ic, sl);
return NULL; return NULL;
...@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) ...@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad; goto bad;
} }
sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
if (!sg) { if (!sg) {
*error = "Unable to allocate sg list"; *error = "Unable to allocate sg list";
r = -ENOMEM; r = -ENOMEM;
...@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) ...@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
r = -ENOMEM; r = -ENOMEM;
goto bad; goto bad;
} }
ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
if (!ic->sk_requests) { if (!ic->sk_requests) {
*error = "Unable to allocate sk requests"; *error = "Unable to allocate sk requests";
r = -ENOMEM; r = -ENOMEM;
...@@ -2740,7 +2726,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) ...@@ -2740,7 +2726,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
r = -ENOMEM; r = -ENOMEM;
goto bad; goto bad;
} }
ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
if (!ic->journal_tree) { if (!ic->journal_tree) {
*error = "Could not allocate memory for journal tree"; *error = "Could not allocate memory for journal tree";
r = -ENOMEM; r = -ENOMEM;
......
...@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern ...@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
} }
/* /*
* Try to avoid low memory issues when a device is suspended. * Use __GFP_HIGH to avoid low memory issues when a device is
* suspended and the ioctl is needed to resume it.
* Use kmalloc() rather than vmalloc() when we can. * Use kmalloc() rather than vmalloc() when we can.
*/ */
dmi = NULL; dmi = NULL;
noio_flag = memalloc_noio_save(); noio_flag = memalloc_noio_save();
dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
memalloc_noio_restore(noio_flag); memalloc_noio_restore(noio_flag);
if (!dmi) { if (!dmi) {
......
...@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti) ...@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct mirror *m; struct mirror *m;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
.bi_op_flags = REQ_PREFLUSH, .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = ms->io_client, .client = ms->io_client,
......
...@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store, ...@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/* /*
* Commit exceptions to disk. * Commit exceptions to disk.
*/ */
if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) if (ps->valid && area_io(ps, REQ_OP_WRITE,
REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
ps->valid = 0; ps->valid = 0;
/* /*
......
...@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, ...@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
return r; return r;
} }
if (likely(v->version >= 1)) if (likely(v->salt_size && (v->version >= 1)))
r = verity_hash_update(v, req, v->salt, v->salt_size, res); r = verity_hash_update(v, req, v->salt, v->salt_size, res);
return r; return r;
...@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, ...@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
{ {
int r; int r;
if (unlikely(!v->version)) { if (unlikely(v->salt_size && (!v->version))) {
r = verity_hash_update(v, req, v->salt, v->salt_size, res); r = verity_hash_update(v, req, v->salt, v->salt_size, res);
if (r < 0) { if (r < 0) {
......
...@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor)
bio_init(&md->flush_bio, NULL, 0); bio_init(&md->flush_bio, NULL, 0);
md->flush_bio.bi_bdev = md->bdev; md->flush_bio.bi_bdev = md->bdev;
md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
dm_stats_init(&md->stats); dm_stats_init(&md->stats);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment