Commit c183e170 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.16/dm-changes' of...

Merge tag 'for-5.16/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Add DM core support for emitting audit events through the audit
   subsystem. Also enhance both the integrity and crypt targets to emit
   events to via dm-audit.

 - Various other simple code improvements and cleanups.

* tag 'for-5.16/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm table: log table creation error code
  dm: make workqueue names device-specific
  dm writecache: Make use of the helper macro kthread_run()
  dm crypt: Make use of the helper macro kthread_run()
  dm verity: use bvec_kmap_local in verity_for_bv_block
  dm log writes: use memcpy_from_bvec in log_writes_map
  dm integrity: use bvec_kmap_local in __journal_read_write
  dm integrity: use bvec_kmap_local in integrity_metadata
  dm: add add_disk() error handling
  dm: Remove redundant flush_workqueue() calls
  dm crypt: log aead integrity violations to audit subsystem
  dm integrity: log audit events for dm-integrity target
  dm: introduce audit event module for device mapper
parents 37259498 7552750d
...@@ -610,6 +610,7 @@ config DM_INTEGRITY ...@@ -610,6 +610,7 @@ config DM_INTEGRITY
select CRYPTO select CRYPTO
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select ASYNC_XOR select ASYNC_XOR
select DM_AUDIT if AUDIT
help help
This device-mapper target emulates a block device that has This device-mapper target emulates a block device that has
additional per-sector tags that can be used for storing additional per-sector tags that can be used for storing
...@@ -642,4 +643,13 @@ config DM_ZONED ...@@ -642,4 +643,13 @@ config DM_ZONED
If unsure, say N. If unsure, say N.
config DM_AUDIT
bool "DM audit events"
depends on AUDIT
help
Generate audit events for device-mapper.
Enables audit logging of several security relevant events in the
particular device-mapper targets, especially the integrity target.
endif # MD endif # MD
...@@ -107,3 +107,7 @@ endif ...@@ -107,3 +107,7 @@ endif
ifeq ($(CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG),y) ifeq ($(CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG),y)
dm-verity-objs += dm-verity-verify-sig.o dm-verity-objs += dm-verity-verify-sig.o
endif endif
ifeq ($(CONFIG_DM_AUDIT),y)
dm-mod-objs += dm-audit.o
endif
// SPDX-License-Identifier: GPL-2.0
/*
* Creating audit records for mapped devices.
*
* Copyright (C) 2021 Fraunhofer AISEC. All rights reserved.
*
* Authors: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
*/
#include <linux/audit.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include "dm-audit.h"
#include "dm-core.h"
static struct audit_buffer *dm_audit_log_start(int audit_type,
const char *dm_msg_prefix,
const char *op)
{
struct audit_buffer *ab;
if (audit_enabled == AUDIT_OFF)
return NULL;
ab = audit_log_start(audit_context(), GFP_KERNEL, audit_type);
if (unlikely(!ab))
return NULL;
audit_log_format(ab, "module=%s op=%s", dm_msg_prefix, op);
return ab;
}
void dm_audit_log_ti(int audit_type, const char *dm_msg_prefix, const char *op,
struct dm_target *ti, int result)
{
struct audit_buffer *ab = NULL;
struct mapped_device *md = dm_table_get_md(ti->table);
int dev_major = dm_disk(md)->major;
int dev_minor = dm_disk(md)->first_minor;
switch (audit_type) {
case AUDIT_DM_CTRL:
ab = dm_audit_log_start(audit_type, dm_msg_prefix, op);
if (unlikely(!ab))
return;
audit_log_task_info(ab);
audit_log_format(ab, " dev=%d:%d error_msg='%s'", dev_major,
dev_minor, !result ? ti->error : "success");
break;
case AUDIT_DM_EVENT:
ab = dm_audit_log_start(audit_type, dm_msg_prefix, op);
if (unlikely(!ab))
return;
audit_log_format(ab, " dev=%d:%d sector=?", dev_major,
dev_minor);
break;
default: /* unintended use */
return;
}
audit_log_format(ab, " res=%d", result);
audit_log_end(ab);
}
EXPORT_SYMBOL_GPL(dm_audit_log_ti);
void dm_audit_log_bio(const char *dm_msg_prefix, const char *op,
struct bio *bio, sector_t sector, int result)
{
struct audit_buffer *ab;
int dev_major = MAJOR(bio->bi_bdev->bd_dev);
int dev_minor = MINOR(bio->bi_bdev->bd_dev);
ab = dm_audit_log_start(AUDIT_DM_EVENT, dm_msg_prefix, op);
if (unlikely(!ab))
return;
audit_log_format(ab, " dev=%d:%d sector=%llu res=%d",
dev_major, dev_minor, sector, result);
audit_log_end(ab);
}
EXPORT_SYMBOL_GPL(dm_audit_log_bio);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Creating audit records for mapped devices.
*
* Copyright (C) 2021 Fraunhofer AISEC. All rights reserved.
*
* Authors: Michael Weiß <michael.weiss@aisec.fraunhofer.de>
*/
#ifndef DM_AUDIT_H
#define DM_AUDIT_H
#include <linux/device-mapper.h>
#include <linux/audit.h>
#ifdef CONFIG_DM_AUDIT
void dm_audit_log_bio(const char *dm_msg_prefix, const char *op,
struct bio *bio, sector_t sector, int result);
/*
* dm_audit_log_ti() is not intended to be used directly in dm modules,
* the wrapper functions below should be called by dm modules instead.
*/
void dm_audit_log_ti(int audit_type, const char *dm_msg_prefix, const char *op,
struct dm_target *ti, int result);
static inline void dm_audit_log_ctr(const char *dm_msg_prefix,
struct dm_target *ti, int result)
{
dm_audit_log_ti(AUDIT_DM_CTRL, dm_msg_prefix, "ctr", ti, result);
}
static inline void dm_audit_log_dtr(const char *dm_msg_prefix,
struct dm_target *ti, int result)
{
dm_audit_log_ti(AUDIT_DM_CTRL, dm_msg_prefix, "dtr", ti, result);
}
static inline void dm_audit_log_target(const char *dm_msg_prefix, const char *op,
struct dm_target *ti, int result)
{
dm_audit_log_ti(AUDIT_DM_EVENT, dm_msg_prefix, op, ti, result);
}
#else
static inline void dm_audit_log_bio(const char *dm_msg_prefix, const char *op,
struct bio *bio, sector_t sector,
int result)
{
}
static inline void dm_audit_log_target(const char *dm_msg_prefix,
const char *op, struct dm_target *ti,
int result)
{
}
static inline void dm_audit_log_ctr(const char *dm_msg_prefix,
struct dm_target *ti, int result)
{
}
static inline void dm_audit_log_dtr(const char *dm_msg_prefix,
struct dm_target *ti, int result)
{
}
#endif
#endif
...@@ -2082,7 +2082,6 @@ static void __exit dm_bufio_exit(void) ...@@ -2082,7 +2082,6 @@ static void __exit dm_bufio_exit(void)
int bug = 0; int bug = 0;
cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
flush_workqueue(dm_bufio_wq);
destroy_workqueue(dm_bufio_wq); destroy_workqueue(dm_bufio_wq);
if (dm_bufio_client_count) { if (dm_bufio_client_count) {
......
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <linux/device-mapper.h> #include <linux/device-mapper.h>
#include "dm-audit.h"
#define DM_MSG_PREFIX "crypt" #define DM_MSG_PREFIX "crypt"
/* /*
...@@ -1363,8 +1365,12 @@ static int crypt_convert_block_aead(struct crypt_config *cc, ...@@ -1363,8 +1365,12 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (r == -EBADMSG) { if (r == -EBADMSG) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), sector_t s = le64_to_cpu(*sector);
(unsigned long long)le64_to_cpu(*sector));
DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
bio_devname(ctx->bio_in, b), s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
} }
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
...@@ -2174,8 +2180,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, ...@@ -2174,8 +2180,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
if (error == -EBADMSG) { if (error == -EBADMSG) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu",
bio_devname(ctx->bio_in, b), s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
io->error = BLK_STS_PROTECTION; io->error = BLK_STS_PROTECTION;
} else if (error < 0) } else if (error < 0)
io->error = BLK_STS_IOERR; io->error = BLK_STS_IOERR;
...@@ -2735,6 +2745,8 @@ static void crypt_dtr(struct dm_target *ti) ...@@ -2735,6 +2745,8 @@ static void crypt_dtr(struct dm_target *ti)
dm_crypt_clients_n--; dm_crypt_clients_n--;
crypt_calculate_pages_per_client(); crypt_calculate_pages_per_client();
spin_unlock(&dm_crypt_clients_lock); spin_unlock(&dm_crypt_clients_lock);
dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
} }
static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
...@@ -3351,21 +3363,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3351,21 +3363,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&cc->write_thread_lock); spin_lock_init(&cc->write_thread_lock);
cc->write_tree = RB_ROOT; cc->write_tree = RB_ROOT;
cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
if (IS_ERR(cc->write_thread)) { if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread); ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL; cc->write_thread = NULL;
ti->error = "Couldn't spawn write thread"; ti->error = "Couldn't spawn write thread";
goto bad; goto bad;
} }
wake_up_process(cc->write_thread);
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->limit_swap_bios = true; ti->limit_swap_bios = true;
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
return 0; return 0;
bad: bad:
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
crypt_dtr(ti); crypt_dtr(ti);
return ret; return ret;
} }
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/async_tx.h> #include <linux/async_tx.h>
#include <linux/dm-bufio.h> #include <linux/dm-bufio.h>
#include "dm-audit.h"
#define DM_MSG_PREFIX "integrity" #define DM_MSG_PREFIX "integrity"
#define DEFAULT_INTERLEAVE_SECTORS 32768 #define DEFAULT_INTERLEAVE_SECTORS 32768
...@@ -539,6 +541,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr) ...@@ -539,6 +541,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
} }
if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) { if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
dm_integrity_io_error(ic, "superblock mac", -EILSEQ); dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
return -EILSEQ; return -EILSEQ;
} }
} }
...@@ -876,8 +879,10 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) ...@@ -876,8 +879,10 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
if (likely(wr)) if (likely(wr))
memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
else { else {
if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
dm_integrity_io_error(ic, "journal mac", -EILSEQ); dm_integrity_io_error(ic, "journal mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
}
} }
} }
} }
...@@ -1765,7 +1770,7 @@ static void integrity_metadata(struct work_struct *w) ...@@ -1765,7 +1770,7 @@ static void integrity_metadata(struct work_struct *w)
char *mem, *checksums_ptr; char *mem, *checksums_ptr;
again: again:
mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset; mem = bvec_kmap_local(&bv);
pos = 0; pos = 0;
checksums_ptr = checksums; checksums_ptr = checksums;
do { do {
...@@ -1775,17 +1780,22 @@ static void integrity_metadata(struct work_struct *w) ...@@ -1775,17 +1780,22 @@ static void integrity_metadata(struct work_struct *w)
pos += ic->sectors_per_block << SECTOR_SHIFT; pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block; sector += ic->sectors_per_block;
} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
kunmap_atomic(mem); kunmap_local(mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) { if (unlikely(r)) {
if (r > 0) { if (r > 0) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b), sector_t s;
(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
DMERR_LIMIT("%s: Checksum failed at sector 0x%llx",
bio_devname(bio, b), s);
r = -EILSEQ; r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches); atomic64_inc(&ic->number_of_mismatches);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
bio, s, 0);
} }
if (likely(checksums != checksums_onstack)) if (likely(checksums != checksums_onstack))
kfree(checksums); kfree(checksums);
...@@ -1953,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, ...@@ -1953,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
n_sectors -= bv.bv_len >> SECTOR_SHIFT; n_sectors -= bv.bv_len >> SECTOR_SHIFT;
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
retry_kmap: retry_kmap:
mem = kmap_atomic(bv.bv_page); mem = bvec_kmap_local(&bv);
if (likely(dio->op == REQ_OP_WRITE)) if (likely(dio->op == REQ_OP_WRITE))
flush_dcache_page(bv.bv_page); flush_dcache_page(bv.bv_page);
...@@ -1967,7 +1977,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, ...@@ -1967,7 +1977,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (unlikely(journal_entry_is_inprogress(je))) { if (unlikely(journal_entry_is_inprogress(je))) {
flush_dcache_page(bv.bv_page); flush_dcache_page(bv.bv_page);
kunmap_atomic(mem); kunmap_local(mem);
__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
goto retry_kmap; goto retry_kmap;
...@@ -1991,6 +2001,8 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, ...@@ -1991,6 +2001,8 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
logical_sector); logical_sector);
dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
bio, logical_sector, 0);
} }
} }
#endif #endif
...@@ -2058,7 +2070,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, ...@@ -2058,7 +2070,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (unlikely(dio->op == REQ_OP_READ)) if (unlikely(dio->op == REQ_OP_READ))
flush_dcache_page(bv.bv_page); flush_dcache_page(bv.bv_page);
kunmap_atomic(mem); kunmap_local(mem);
} while (n_sectors); } while (n_sectors);
if (likely(dio->op == REQ_OP_WRITE)) { if (likely(dio->op == REQ_OP_WRITE)) {
...@@ -2534,8 +2546,10 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, ...@@ -2534,8 +2546,10 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag); (char *)access_journal_data(ic, i, l), test_tag);
if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
}
} }
journal_entry_set_unused(je2); journal_entry_set_unused(je2);
...@@ -4514,9 +4528,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -4514,9 +4528,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (ic->discard) if (ic->discard)
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
return 0; return 0;
bad: bad:
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
dm_integrity_dtr(ti); dm_integrity_dtr(ti);
return r; return r;
} }
...@@ -4590,6 +4606,7 @@ static void dm_integrity_dtr(struct dm_target *ti) ...@@ -4590,6 +4606,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
free_alg(&ic->journal_mac_alg); free_alg(&ic->journal_mac_alg);
kfree(ic); kfree(ic);
dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
} }
static struct target_type integrity_target = { static struct target_type integrity_target = {
......
...@@ -753,7 +753,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) ...@@ -753,7 +753,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
*/ */
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
struct page *page; struct page *page;
void *src, *dst; void *dst;
page = alloc_page(GFP_NOIO); page = alloc_page(GFP_NOIO);
if (!page) { if (!page) {
...@@ -765,11 +765,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) ...@@ -765,11 +765,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_KILL; return DM_MAPIO_KILL;
} }
src = kmap_atomic(bv.bv_page);
dst = kmap_atomic(page); dst = kmap_atomic(page);
memcpy(dst, src + bv.bv_offset, bv.bv_len); memcpy_from_bvec(dst, &bv);
kunmap_atomic(dst); kunmap_atomic(dst);
kunmap_atomic(src);
block->vecs[i].bv_page = page; block->vecs[i].bv_page = page;
block->vecs[i].bv_len = bv.bv_len; block->vecs[i].bv_len = bv.bv_len;
block->vec_cnt++; block->vec_cnt++;
......
...@@ -706,7 +706,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, ...@@ -706,7 +706,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
r = dm_split_args(&argc, &argv, params); r = dm_split_args(&argc, &argv, params);
if (r) { if (r) {
tgt->error = "couldn't split parameters (insufficient memory)"; tgt->error = "couldn't split parameters";
goto bad; goto bad;
} }
...@@ -724,7 +724,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, ...@@ -724,7 +724,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return 0; return 0;
bad: bad:
DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r));
dm_put_target_type(tgt->type); dm_put_target_type(tgt->type);
return r; return r;
} }
......
...@@ -428,14 +428,14 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, ...@@ -428,14 +428,14 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
unsigned len; unsigned len;
struct bio_vec bv = bio_iter_iovec(bio, *iter); struct bio_vec bv = bio_iter_iovec(bio, *iter);
page = kmap_atomic(bv.bv_page); page = bvec_kmap_local(&bv);
len = bv.bv_len; len = bv.bv_len;
if (likely(len >= todo)) if (likely(len >= todo))
len = todo; len = todo;
r = process(v, io, page + bv.bv_offset, len); r = process(v, io, page, len);
kunmap_atomic(page); kunmap_local(page);
if (r < 0) if (r < 0)
return r; return r;
......
...@@ -2264,14 +2264,13 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2264,14 +2264,13 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
raw_spin_lock_init(&wc->endio_list_lock); raw_spin_lock_init(&wc->endio_list_lock);
INIT_LIST_HEAD(&wc->endio_list); INIT_LIST_HEAD(&wc->endio_list);
wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); wc->endio_thread = kthread_run(writecache_endio_thread, wc, "writecache_endio");
if (IS_ERR(wc->endio_thread)) { if (IS_ERR(wc->endio_thread)) {
r = PTR_ERR(wc->endio_thread); r = PTR_ERR(wc->endio_thread);
wc->endio_thread = NULL; wc->endio_thread = NULL;
ti->error = "Couldn't spawn endio thread"; ti->error = "Couldn't spawn endio thread";
goto bad; goto bad;
} }
wake_up_process(wc->endio_thread);
/* /*
* Parse the mode (pmem or ssd) * Parse the mode (pmem or ssd)
...@@ -2493,14 +2492,13 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2493,14 +2492,13 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
bio_list_init(&wc->flush_list); bio_list_init(&wc->flush_list);
wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); wc->flush_thread = kthread_run(writecache_flush_thread, wc, "dm_writecache_flush");
if (IS_ERR(wc->flush_thread)) { if (IS_ERR(wc->flush_thread)) {
r = PTR_ERR(wc->flush_thread); r = PTR_ERR(wc->flush_thread);
wc->flush_thread = NULL; wc->flush_thread = NULL;
ti->error = "Couldn't spawn flush thread"; ti->error = "Couldn't spawn flush thread";
goto bad; goto bad;
} }
wake_up_process(wc->flush_thread);
r = calculate_memory_size(wc->memory_map_size, wc->block_size, r = calculate_memory_size(wc->memory_map_size, wc->block_size,
&n_blocks, &n_metadata_blocks); &n_blocks, &n_metadata_blocks);
......
...@@ -967,7 +967,6 @@ static void dmz_dtr(struct dm_target *ti) ...@@ -967,7 +967,6 @@ static void dmz_dtr(struct dm_target *ti)
struct dmz_target *dmz = ti->private; struct dmz_target *dmz = ti->private;
int i; int i;
flush_workqueue(dmz->chunk_wq);
destroy_workqueue(dmz->chunk_wq); destroy_workqueue(dmz->chunk_wq);
for (i = 0; i < dmz->nr_ddevs; i++) for (i = 0; i < dmz->nr_ddevs; i++)
......
...@@ -1792,7 +1792,7 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -1792,7 +1792,7 @@ static struct mapped_device *alloc_dev(int minor)
format_dev_t(md->name, MKDEV(_major, minor)); format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
if (!md->wq) if (!md->wq)
goto bad; goto bad;
......
...@@ -120,6 +120,8 @@ ...@@ -120,6 +120,8 @@
#define AUDIT_EVENT_LISTENER 1335 /* Task joined multicast read socket */ #define AUDIT_EVENT_LISTENER 1335 /* Task joined multicast read socket */
#define AUDIT_URINGOP 1336 /* io_uring operation */ #define AUDIT_URINGOP 1336 /* io_uring operation */
#define AUDIT_OPENAT2 1337 /* Record showing openat2 how args */ #define AUDIT_OPENAT2 1337 /* Record showing openat2 how args */
#define AUDIT_DM_CTRL 1338 /* Device Mapper target control */
#define AUDIT_DM_EVENT 1339 /* Device Mapper events */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment