Commit 90d624af authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.7/block-2023-10-30' of git://git.kernel.dk/linux

Pull block updates from Jens Axboe:

 - Improvements to the queue_rqs() support, and adding null_blk support
   for that as well (Chengming)

 - Series improving badblocks support (Coly)

 - Key store support for sed-opal (Greg)

 - IBM partition string handling improvements (Jan)

 - Make number of ublk devices supported configurable (Mike)

 - Cancelation improvements for ublk (Ming)

 - MD pull requests via Song:
     - Handle timeout in md-cluster, by Denis Plotnikov
     - Cleanup pers->prepare_suspend, by Yu Kuai
     - Rewrite mddev_suspend(), by Yu Kuai
     - Simplify md_seq_ops, by Yu Kuai
     - Reduce unnecessary locking array_state_store(), by Mariusz
       Tkaczyk
     - Make rdev add/remove independent from daemon thread, by Yu Kuai
     - Refactor code around quiesce() and mddev_suspend(), by Yu Kuai

 - NVMe pull request via Keith:
     - nvme-auth updates (Mark)
     - nvme-tcp tls (Hannes)
     - nvme-fc annotaions (Kees)

 - Misc cleanups and improvements (Jiapeng, Joel)

* tag 'for-6.7/block-2023-10-30' of git://git.kernel.dk/linux: (95 commits)
  block: ublk_drv: Remove unused function
  md: cleanup pers->prepare_suspend()
  nvme-auth: allow mixing of secret and hash lengths
  nvme-auth: use transformed key size to create resp
  nvme-auth: alloc nvme_dhchap_key as single buffer
  nvmet-tcp: use 'spin_lock_bh' for state_lock()
  powerpc/pseries: PLPKS SED Opal keystore support
  block: sed-opal: keystore access for SED Opal keys
  block:sed-opal: SED Opal keystore
  ublk: simplify aborting request
  ublk: replace monitor with cancelable uring_cmd
  ublk: quiesce request queue when aborting queue
  ublk: rename mm_lock as lock
  ublk: move ublk_cancel_dev() out of ub->mutex
  ublk: make sure io cmd handled in submitter task context
  ublk: don't get ublk device reference in ublk_abort_queue()
  ublk: Make ublks_max configurable
  ublk: Limit dev_id/ub_number values
  md-cluster: check for timeout while a new disk adding
  nvme: rework NVME_AUTH Kconfig selection
  ...
parents 4de520f1 0c696bb3
......@@ -164,6 +164,12 @@ config PSERIES_PLPKS
# This option is selected by in-kernel consumers that require
# access to the PKS.
config PSERIES_PLPKS_SED
depends on PPC_PSERIES
bool
# This option is selected by in-kernel consumers that require
# access to the SED PKS keystore.
config PAPR_SCM
depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
tristate "Support for the PAPR Storage Class Memory interface"
......
......@@ -29,6 +29,7 @@ obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
obj-$(CONFIG_PSERIES_PLPKS) += plpks.o
obj-$(CONFIG_PPC_SECURE_BOOT) += plpks-secvar.o
obj-$(CONFIG_PSERIES_PLPKS_SED) += plpks_sed_ops.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Platform specific code for non-volatile SED key access
* Copyright (C) 2022 IBM Corporation
*
* Define operations for SED Opal to read/write keys
* from POWER LPAR Platform KeyStore(PLPKS).
*
* Self Encrypting Drives(SED) key storage using PLPKS
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ioctl.h>
#include <linux/sed-opal-key.h>
#include <asm/plpks.h>
static bool plpks_sed_initialized = false;
static bool plpks_sed_available = false;
/*
* structure that contains all SED data
*/
struct plpks_sed_object_data {
u_char version;
u_char pad1[7];
u_long authority;
u_long range;
u_int key_len;
u_char key[32];
};
#define PLPKS_SED_OBJECT_DATA_V0 0
#define PLPKS_SED_MANGLED_LABEL "/default/pri"
#define PLPKS_SED_COMPONENT "sed-opal"
#define PLPKS_SED_KEY "opal-boot-pin"
/*
* authority is admin1 and range is global
*/
#define PLPKS_SED_AUTHORITY 0x0000000900010001
#define PLPKS_SED_RANGE 0x0000080200000001
static void plpks_init_var(struct plpks_var *var, char *keyname)
{
if (!plpks_sed_initialized) {
plpks_sed_initialized = true;
plpks_sed_available = plpks_is_available();
if (!plpks_sed_available)
pr_err("SED: plpks not available\n");
}
var->name = keyname;
var->namelen = strlen(keyname);
if (strcmp(PLPKS_SED_KEY, keyname) == 0) {
var->name = PLPKS_SED_MANGLED_LABEL;
var->namelen = strlen(keyname);
}
var->policy = PLPKS_WORLDREADABLE;
var->os = PLPKS_VAR_COMMON;
var->data = NULL;
var->datalen = 0;
var->component = PLPKS_SED_COMPONENT;
}
/*
* Read the SED Opal key from PLPKS given the label
*/
int sed_read_key(char *keyname, char *key, u_int *keylen)
{
struct plpks_var var;
struct plpks_sed_object_data data;
int ret;
u_int len;
plpks_init_var(&var, keyname);
if (!plpks_sed_available)
return -EOPNOTSUPP;
var.data = (u8 *)&data;
var.datalen = sizeof(data);
ret = plpks_read_os_var(&var);
if (ret != 0)
return ret;
len = min_t(u16, be32_to_cpu(data.key_len), var.datalen);
memcpy(key, data.key, len);
key[len] = '\0';
*keylen = len;
return 0;
}
/*
* Write the SED Opal key to PLPKS given the label
*/
int sed_write_key(char *keyname, char *key, u_int keylen)
{
struct plpks_var var;
struct plpks_sed_object_data data;
struct plpks_var_name vname;
plpks_init_var(&var, keyname);
if (!plpks_sed_available)
return -EOPNOTSUPP;
var.datalen = sizeof(struct plpks_sed_object_data);
var.data = (u8 *)&data;
/* initialize SED object */
data.version = PLPKS_SED_OBJECT_DATA_V0;
data.authority = cpu_to_be64(PLPKS_SED_AUTHORITY);
data.range = cpu_to_be64(PLPKS_SED_RANGE);
memset(&data.pad1, '\0', sizeof(data.pad1));
data.key_len = cpu_to_be32(keylen);
memcpy(data.key, (char *)key, keylen);
/*
* Key update requires remove first. The return value
* is ignored since it's okay if the key doesn't exist.
*/
vname.namelen = var.namelen;
vname.name = var.name;
plpks_remove_var(var.component, var.os, vname);
return plpks_write_var(var);
}
......@@ -186,6 +186,7 @@ config BLK_SED_OPAL
bool "Logic for interfacing with Opal enabled SEDs"
depends on KEYS
select PSERIES_PLPKS if PPC_PSERIES
select PSERIES_PLPKS_SED if PPC_PSERIES
help
Builds Logic for interfacing with Opal enabled controllers.
Enabling this option enables users to setup/unlock/lock
......
This diff is collapsed.
......@@ -323,16 +323,9 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->mq_ctx = first_rq->mq_ctx;
flush_rq->mq_hctx = first_rq->mq_hctx;
if (!q->elevator) {
if (!q->elevator)
flush_rq->tag = first_rq->tag;
/*
* We borrow data request's driver tag, so have to mark
* this flush request as INFLIGHT for avoiding double
* account of this driver tag
*/
flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
} else
else
flush_rq->internal_tag = first_rq->internal_tag;
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
......
......@@ -246,7 +246,6 @@ static const char *const rqf_name[] = {
RQF_NAME(STARTED),
RQF_NAME(FLUSH_SEQ),
RQF_NAME(MIXED_MERGE),
RQF_NAME(MQ_INFLIGHT),
RQF_NAME(DONTPREP),
RQF_NAME(SCHED_TAGS),
RQF_NAME(USE_SCHED),
......
......@@ -426,6 +426,8 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
rq_list_add(data->cached_rq, rq);
nr++;
}
if (!(data->rq_flags & RQF_SCHED_TAGS))
blk_mq_add_active_requests(data->hctx, nr);
/* caller already holds a reference, add for remainder */
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
data->nr_tags -= nr;
......@@ -510,6 +512,8 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
goto retry;
}
if (!(data->rq_flags & RQF_SCHED_TAGS))
blk_mq_inc_active_requests(data->hctx);
rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
blk_mq_rq_time_init(rq, alloc_time_ns);
return rq;
......@@ -669,6 +673,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
tag = blk_mq_get_tag(&data);
if (tag == BLK_MQ_NO_TAG)
goto out_queue_exit;
if (!(data.rq_flags & RQF_SCHED_TAGS))
blk_mq_inc_active_requests(data.hctx);
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
blk_mq_rq_time_init(rq, alloc_time_ns);
rq->__data_len = 0;
......@@ -708,11 +714,10 @@ static void __blk_mq_free_request(struct request *rq)
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;
if (rq->rq_flags & RQF_MQ_INFLIGHT)
__blk_mq_dec_active_requests(hctx);
if (rq->tag != BLK_MQ_NO_TAG)
if (rq->tag != BLK_MQ_NO_TAG) {
blk_mq_dec_active_requests(hctx);
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
}
if (sched_tag != BLK_MQ_NO_TAG)
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
......@@ -1061,12 +1066,7 @@ static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
{
struct request_queue *q = hctx->queue;
/*
* All requests should have been marked as RQF_MQ_INFLIGHT, so
* update hctx->nr_active in batch
*/
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_sub_active_requests(hctx, nr_tags);
blk_mq_sub_active_requests(hctx, nr_tags);
blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
percpu_ref_put_many(&q->q_usage_counter, nr_tags);
......@@ -1259,6 +1259,7 @@ void blk_mq_start_request(struct request *rq)
blk_add_timer(rq);
WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
rq->mq_hctx->tags->rqs[rq->tag] = rq;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
......@@ -1748,7 +1749,7 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
return data.rq;
}
static bool __blk_mq_alloc_driver_tag(struct request *rq)
bool __blk_mq_alloc_driver_tag(struct request *rq)
{
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
......@@ -1769,20 +1770,7 @@ static bool __blk_mq_alloc_driver_tag(struct request *rq)
return false;
rq->tag = tag + tag_offset;
return true;
}
bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
return false;
if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
rq->rq_flags |= RQF_MQ_INFLIGHT;
__blk_mq_inc_active_requests(hctx);
}
hctx->tags->rqs[rq->tag] = rq;
blk_mq_inc_active_requests(rq->mq_hctx);
return true;
}
......@@ -2794,13 +2782,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* If we do, we can dispatch the whole plug list in one go. We
* already know at this point that all requests belong to the
* same queue, caller must ensure that's the case.
*
* Since we pass off the full list to the driver at this point,
* we do not increment the active request count for the queue.
* Bypass shared tags for now because of that.
*/
if (q->mq_ops->queue_rqs &&
!(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
if (q->mq_ops->queue_rqs) {
blk_mq_run_dispatch_ops(q,
__blk_mq_flush_plug_list(q, plug));
if (rq_list_empty(plug->mq_list))
......
......@@ -271,12 +271,18 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq)
return -1;
}
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (blk_mq_is_shared_tags(hctx->flags))
atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
else
atomic_inc(&hctx->nr_active);
atomic_add(val, &hctx->nr_active);
}
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_add_active_requests(hctx, 1);
}
static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
......@@ -293,6 +299,32 @@ static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
__blk_mq_sub_active_requests(hctx, 1);
}
static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_add_active_requests(hctx, val);
}
static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_inc_active_requests(hctx);
}
static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_sub_active_requests(hctx, val);
}
static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_dec_active_requests(hctx);
}
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (blk_mq_is_shared_tags(hctx->flags))
......@@ -302,13 +334,9 @@ static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
blk_mq_dec_active_requests(hctx);
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = BLK_MQ_NO_TAG;
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
rq->rq_flags &= ~RQF_MQ_INFLIGHT;
__blk_mq_dec_active_requests(hctx);
}
}
static inline void blk_mq_put_driver_tag(struct request *rq)
......@@ -319,19 +347,14 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
__blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
bool __blk_mq_alloc_driver_tag(struct request *rq);
static inline bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->tag != BLK_MQ_NO_TAG &&
!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
hctx->tags->rqs[rq->tag] = rq;
return true;
}
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
return false;
return __blk_mq_get_driver_tag(hctx, rq);
return true;
}
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
......
......@@ -61,6 +61,47 @@ static sector_t cchhb2blk(struct vtoc_cchhb *ptr, struct hd_geometry *geo)
ptr->b;
}
/* Volume Label Type/ID Length */
#define DASD_VOL_TYPE_LEN 4
#define DASD_VOL_ID_LEN 6
/* Volume Label Types */
#define DASD_VOLLBL_TYPE_VOL1 0
#define DASD_VOLLBL_TYPE_LNX1 1
#define DASD_VOLLBL_TYPE_CMS1 2
struct dasd_vollabel {
char *type;
int idx;
};
static struct dasd_vollabel dasd_vollabels[] = {
[DASD_VOLLBL_TYPE_VOL1] = {
.type = "VOL1",
.idx = DASD_VOLLBL_TYPE_VOL1,
},
[DASD_VOLLBL_TYPE_LNX1] = {
.type = "LNX1",
.idx = DASD_VOLLBL_TYPE_LNX1,
},
[DASD_VOLLBL_TYPE_CMS1] = {
.type = "CMS1",
.idx = DASD_VOLLBL_TYPE_CMS1,
},
};
static int get_label_by_type(const char *type)
{
int i;
for (i = 0; i < ARRAY_SIZE(dasd_vollabels); i++) {
if (!memcmp(type, dasd_vollabels[i].type, DASD_VOL_TYPE_LEN))
return dasd_vollabels[i].idx;
}
return -1;
}
static int find_label(struct parsed_partitions *state,
dasd_information2_t *info,
struct hd_geometry *geo,
......@@ -70,12 +111,10 @@ static int find_label(struct parsed_partitions *state,
char type[],
union label_t *label)
{
Sector sect;
unsigned char *data;
sector_t testsect[3];
unsigned char temp[5];
int found = 0;
int i, testcount;
Sector sect;
void *data;
/* There a three places where we may find a valid label:
* - on an ECKD disk it's block 2
......@@ -103,31 +142,27 @@ static int find_label(struct parsed_partitions *state,
if (data == NULL)
continue;
memcpy(label, data, sizeof(*label));
memcpy(temp, data, 4);
temp[4] = 0;
EBCASC(temp, 4);
memcpy(type, data, DASD_VOL_TYPE_LEN);
EBCASC(type, DASD_VOL_TYPE_LEN);
put_dev_sector(sect);
if (!strcmp(temp, "VOL1") ||
!strcmp(temp, "LNX1") ||
!strcmp(temp, "CMS1")) {
if (!strcmp(temp, "VOL1")) {
strncpy(type, label->vol.vollbl, 4);
strncpy(name, label->vol.volid, 6);
} else {
strncpy(type, label->lnx.vollbl, 4);
strncpy(name, label->lnx.volid, 6);
}
EBCASC(type, 4);
EBCASC(name, 6);
switch (get_label_by_type(type)) {
case DASD_VOLLBL_TYPE_VOL1:
memcpy(name, label->vol.volid, DASD_VOL_ID_LEN);
EBCASC(name, DASD_VOL_ID_LEN);
*labelsect = testsect[i];
return 1;
case DASD_VOLLBL_TYPE_LNX1:
case DASD_VOLLBL_TYPE_CMS1:
memcpy(name, label->lnx.volid, DASD_VOL_ID_LEN);
EBCASC(name, DASD_VOL_ID_LEN);
*labelsect = testsect[i];
found = 1;
return 1;
default:
break;
}
}
if (!found)
memset(label, 0, sizeof(*label));
return found;
return 0;
}
static int find_vol1_partitions(struct parsed_partitions *state,
......@@ -297,8 +332,8 @@ int ibm_partition(struct parsed_partitions *state)
sector_t nr_sectors;
dasd_information2_t *info;
struct hd_geometry *geo;
char type[5] = {0,};
char name[7] = {0,};
char type[DASD_VOL_TYPE_LEN + 1] = "";
char name[DASD_VOL_ID_LEN + 1] = "";
sector_t labelsect;
union label_t *label;
......@@ -330,18 +365,21 @@ int ibm_partition(struct parsed_partitions *state)
info = NULL;
}
if (find_label(state, info, geo, blocksize, &labelsect, name, type,
label)) {
if (!strncmp(type, "VOL1", 4)) {
if (find_label(state, info, geo, blocksize, &labelsect, name, type, label)) {
switch (get_label_by_type(type)) {
case DASD_VOLLBL_TYPE_VOL1:
res = find_vol1_partitions(state, geo, blocksize, name,
label);
} else if (!strncmp(type, "LNX1", 4)) {
break;
case DASD_VOLLBL_TYPE_LNX1:
res = find_lnx1_partitions(state, geo, blocksize, name,
label, labelsect, nr_sectors,
info);
} else if (!strncmp(type, "CMS1", 4)) {
break;
case DASD_VOLLBL_TYPE_CMS1:
res = find_cms1_partitions(state, geo, blocksize, name,
label, labelsect);
break;
}
} else if (info) {
/*
......
......@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
#include <uapi/linux/sed-opal.h>
#include <linux/sed-opal.h>
#include <linux/sed-opal-key.h>
#include <linux/string.h>
#include <linux/kdev_t.h>
#include <linux/key.h>
......@@ -3018,7 +3019,13 @@ static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
if (ret)
return ret;
/* update keyring with new password */
/* update keyring and key store with new password */
ret = sed_write_key(OPAL_AUTH_KEY,
opal_pw->new_user_pw.opal_key.key,
opal_pw->new_user_pw.opal_key.key_len);
if (ret != -EOPNOTSUPP)
pr_warn("error updating SED key: %d\n", ret);
ret = update_sed_opal_key(OPAL_AUTH_KEY,
opal_pw->new_user_pw.opal_key.key,
opal_pw->new_user_pw.opal_key.key_len);
......@@ -3291,6 +3298,8 @@ EXPORT_SYMBOL_GPL(sed_ioctl);
static int __init sed_opal_init(void)
{
struct key *kr;
char init_sed_key[OPAL_KEY_MAX];
int keylen = OPAL_KEY_MAX - 1;
kr = keyring_alloc(".sed_opal",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
......@@ -3303,6 +3312,11 @@ static int __init sed_opal_init(void)
sed_opal_keyring = kr;
return 0;
if (sed_read_key(OPAL_AUTH_KEY, init_sed_key, &keylen) < 0) {
memset(init_sed_key, '\0', sizeof(init_sed_key));
keylen = OPAL_KEY_MAX - 1;
}
return update_sed_opal_key(OPAL_AUTH_KEY, init_sed_key, keylen);
}
late_initcall(sed_opal_init);
......@@ -39,8 +39,7 @@ static struct ktstate kts;
#ifndef MODULE
static int __init aoe_iflist_setup(char *str)
{
strncpy(aoe_iflist, str, IFLISTSZ);
aoe_iflist[IFLISTSZ - 1] = '\0';
strscpy(aoe_iflist, str, IFLISTSZ);
return 1;
}
......
......@@ -1750,6 +1750,25 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
}
static void null_queue_rqs(struct request **rqlist)
{
struct request *requeue_list = NULL;
struct request **requeue_lastp = &requeue_list;
struct blk_mq_queue_data bd = { };
blk_status_t ret;
do {
struct request *rq = rq_list_pop(rqlist);
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
rq_list_add_tail(&requeue_lastp, rq);
} while (!rq_list_empty(*rqlist));
*rqlist = requeue_list;
}
static void cleanup_queue(struct nullb_queue *nq)
{
bitmap_free(nq->tag_map);
......@@ -1802,6 +1821,7 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.queue_rqs = null_queue_rqs,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
.poll = null_poll,
......@@ -1946,7 +1966,7 @@ static int null_gendisk_register(struct nullb *nullb)
else
disk->fops = &null_bio_ops;
disk->private_data = nullb;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
strscpy_pad(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
if (nullb->dev->zoned) {
int ret = null_register_zoned_dev(nullb);
......
This diff is collapsed.
......@@ -470,8 +470,6 @@ static bool virtblk_prep_rq_batch(struct request *req)
struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
req->mq_hctx->tags->rqs[req->tag] = req;
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
......
......@@ -3655,7 +3655,6 @@ static struct ctl_table cdrom_table[] = {
.mode = 0644,
.proc_handler = cdrom_sysctl_handler
},
{ }
};
static struct ctl_table_header *cdrom_sysctl_header;
......
......@@ -749,7 +749,11 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return ERR_PTR(-ENOMEM);
}
mddev_init(&rs->md);
if (mddev_init(&rs->md)) {
kfree(rs);
ti->error = "Cannot initialize raid context";
return ERR_PTR(-ENOMEM);
}
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
......@@ -798,6 +802,7 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
mddev_destroy(&rs->md);
kfree(rs);
}
......@@ -3239,7 +3244,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
mddev_lock_nointr(&rs->md);
mddev_suspend_and_lock_nointr(&rs->md);
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
if (r) {
......@@ -3263,7 +3268,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
}
mddev_suspend(&rs->md);
set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
/* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
......@@ -3793,9 +3797,7 @@ static void raid_postsuspend(struct dm_target *ti)
if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
md_stop_writes(&rs->md);
mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
mddev_unlock(&rs->md);
mddev_suspend(&rs->md, false);
}
}
......@@ -4054,8 +4056,7 @@ static void raid_resume(struct dm_target *ti)
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
mddev->ro = 0;
mddev->in_sync = 0;
mddev_resume(mddev);
mddev_unlock(mddev);
mddev_unlock_and_resume(mddev);
}
}
......
......@@ -175,7 +175,7 @@ static void __init md_setup_drive(struct md_setup_args *args)
return;
}
err = mddev_lock(mddev);
err = mddev_suspend_and_lock(mddev);
if (err) {
pr_err("md: failed to lock array %s\n", name);
goto out_mddev_put;
......@@ -221,7 +221,7 @@ static void __init md_setup_drive(struct md_setup_args *args)
if (err)
pr_warn("md: starting %s failed\n", name);
out_unlock:
mddev_unlock(mddev);
mddev_unlock_and_resume(mddev);
out_mddev_put:
mddev_put(mddev);
}
......
......@@ -1861,7 +1861,7 @@ void md_bitmap_destroy(struct mddev *mddev)
md_bitmap_wait_behind_writes(mddev);
if (!mddev->serialize_policy)
mddev_destroy_serial_pool(mddev, NULL, true);
mddev_destroy_serial_pool(mddev, NULL);
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
......@@ -1977,7 +1977,7 @@ int md_bitmap_load(struct mddev *mddev)
goto out;
rdev_for_each(rdev, mddev)
mddev_create_serial_pool(mddev, rdev, true);
mddev_create_serial_pool(mddev, rdev);
if (mddev_is_clustered(mddev))
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
......@@ -2348,14 +2348,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
{
int rv;
rv = mddev_lock(mddev);
rv = mddev_suspend_and_lock(mddev);
if (rv)
return rv;
if (mddev->pers) {
if (!mddev->pers->quiesce) {
rv = -EBUSY;
goto out;
}
if (mddev->recovery || mddev->sync_thread) {
rv = -EBUSY;
goto out;
......@@ -2369,11 +2366,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EBUSY;
goto out;
}
if (mddev->pers) {
mddev_suspend(mddev);
md_bitmap_destroy(mddev);
mddev_resume(mddev);
}
md_bitmap_destroy(mddev);
mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
......@@ -2383,6 +2377,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
} else {
/* No bitmap, OK to set a location */
long long offset;
struct bitmap *bitmap;
if (strncmp(buf, "none", 4) == 0)
/* nothing to be done */;
else if (strncmp(buf, "file:", 5) == 0) {
......@@ -2406,25 +2402,20 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
goto out;
}
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev);
if (IS_ERR(bitmap))
rv = PTR_ERR(bitmap);
else {
mddev->bitmap = bitmap;
rv = md_bitmap_load(mddev);
if (rv)
mddev->bitmap_info.offset = 0;
}
if (rv) {
md_bitmap_destroy(mddev);
mddev_resume(mddev);
goto out;
}
mddev_resume(mddev);
bitmap = md_bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
rv = PTR_ERR(bitmap);
goto out;
}
mddev->bitmap = bitmap;
rv = md_bitmap_load(mddev);
if (rv) {
mddev->bitmap_info.offset = 0;
md_bitmap_destroy(mddev);
goto out;
}
}
}
......@@ -2437,7 +2428,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
}
rv = 0;
out:
mddev_unlock(mddev);
mddev_unlock_and_resume(mddev);
if (rv)
return rv;
return len;
......@@ -2546,7 +2537,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
if (backlog > COUNTER_MAX)
return -EINVAL;
rv = mddev_lock(mddev);
rv = mddev_suspend_and_lock(mddev);
if (rv)
return rv;
......@@ -2571,16 +2562,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
if (!backlog && mddev->serial_info_pool) {
/* serial_info_pool is not needed if backlog is zero */
if (!mddev->serialize_policy)
mddev_destroy_serial_pool(mddev, NULL, false);
mddev_destroy_serial_pool(mddev, NULL);
} else if (backlog && !mddev->serial_info_pool) {
/* serial_info_pool is needed since backlog is not zero */
rdev_for_each(rdev, mddev)
mddev_create_serial_pool(mddev, rdev, false);
mddev_create_serial_pool(mddev, rdev);
}
if (old_mwb != backlog)
md_bitmap_update_sb(mddev->bitmap);
mddev_unlock(mddev);
mddev_unlock_and_resume(mddev);
return len;
}
......
......@@ -501,7 +501,7 @@ static void process_suspend_info(struct mddev *mddev,
mddev->pers->quiesce(mddev, 0);
}
static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
static int process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
{
char disk_uuid[64];
struct md_cluster_info *cinfo = mddev->cluster_info;
......@@ -509,6 +509,7 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
char raid_slot[16];
char *envp[] = {event_name, disk_uuid, raid_slot, NULL};
int len;
int res = 0;
len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
sprintf(disk_uuid + len, "%pU", cmsg->uuid);
......@@ -517,9 +518,14 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
init_completion(&cinfo->newdisk_completion);
set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp);
wait_for_completion_timeout(&cinfo->newdisk_completion,
NEW_DEV_TIMEOUT);
if (!wait_for_completion_timeout(&cinfo->newdisk_completion,
NEW_DEV_TIMEOUT)) {
pr_err("md-cluster(%s:%d): timeout on a new disk adding\n",
__func__, __LINE__);
res = -1;
}
clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
return res;
}
......@@ -594,7 +600,8 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
le64_to_cpu(msg->high));
break;
case NEWDISK:
process_add_new_disk(mddev, msg);
if (process_add_new_disk(mddev, msg))
ret = -1;
break;
case REMOVE:
process_remove_disk(mddev, msg);
......
......@@ -69,6 +69,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
if (!conf)
return NULL;
/*
* conf->raid_disks is copy of mddev->raid_disks. The reason to
* keep a copy of mddev->raid_disks in struct linear_conf is,
* mddev->raid_disks may not be consistent with pointers number of
* conf->disks[] when it is updated in linear_add() and used to
* iterate old conf->disks[] earray in linear_congested().
* Here conf->raid_disks is always consitent with number of
* pointers in conf->disks[] array, and mddev->private is updated
* with rcu_assign_pointer() in linear_addr(), such race can be
* avoided.
*/
conf->raid_disks = raid_disks;
cnt = 0;
conf->array_sectors = 0;
......@@ -112,19 +125,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
conf->disks[i-1].end_sector +
conf->disks[i].rdev->sectors;
/*
* conf->raid_disks is copy of mddev->raid_disks. The reason to
* keep a copy of mddev->raid_disks in struct linear_conf is,
* mddev->raid_disks may not be consistent with pointers number of
* conf->disks[] when it is updated in linear_add() and used to
* iterate old conf->disks[] earray in linear_congested().
* Here conf->raid_disks is always consitent with number of
* pointers in conf->disks[] array, and mddev->private is updated
* with rcu_assign_pointer() in linear_addr(), such race can be
* avoided.
*/
conf->raid_disks = raid_disks;
return conf;
out:
......@@ -183,7 +183,6 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
* in linear_congested(), therefore kfree_rcu() is used to free
* oldconf until no one uses it anymore.
*/
mddev_suspend(mddev);
oldconf = rcu_dereference_protected(mddev->private,
lockdep_is_held(&mddev->reconfig_mutex));
mddev->raid_disks++;
......@@ -192,7 +191,6 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
mddev_resume(mddev);
kfree_rcu(oldconf, rcu);
return 0;
}
......
......@@ -12,6 +12,6 @@ struct linear_conf
struct rcu_head rcu;
sector_t array_sectors;
int raid_disks; /* a copy of mddev->raid_disks */
struct dev_info disks[];
struct dev_info disks[] __counted_by(raid_disks);
};
#endif
This diff is collapsed.
......@@ -246,10 +246,6 @@ struct md_cluster_info;
* become failed.
* @MD_HAS_PPL: The raid array has PPL feature set.
* @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set.
* @MD_ALLOW_SB_UPDATE: md_check_recovery is allowed to update the metadata
* without taking reconfig_mutex.
* @MD_UPDATING_SB: md_check_recovery is updating the metadata without
* explicitly holding reconfig_mutex.
* @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
* array is ready yet.
* @MD_BROKEN: This is used to stop writes and mark array as failed.
......@@ -266,8 +262,6 @@ enum mddev_flags {
MD_FAILFAST_SUPPORTED,
MD_HAS_PPL,
MD_HAS_MULTIPLE_PPLS,
MD_ALLOW_SB_UPDATE,
MD_UPDATING_SB,
MD_NOT_READY,
MD_BROKEN,
MD_DELETED,
......@@ -314,6 +308,7 @@ struct mddev {
unsigned long sb_flags;
int suspended;
struct mutex suspend_mutex;
struct percpu_ref active_io;
int ro;
int sysfs_active; /* set when sysfs deletes
......@@ -451,7 +446,10 @@ struct mddev {
struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */
struct kernfs_node *sysfs_level; /*handle for 'level' */
struct work_struct del_work; /* used for delayed sysfs removal */
/* used for delayed sysfs removal */
struct work_struct del_work;
/* used for register new sync thread */
struct work_struct sync_work;
/* "lock" protects:
* flush_bio transition from NULL to !NULL
......@@ -565,23 +563,6 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
enum md_ro_state {
MD_RDWR,
MD_RDONLY,
MD_AUTO_READ,
MD_MAX_STATE
};
static inline bool md_is_rdwr(struct mddev *mddev)
{
return (mddev->ro == MD_RDWR);
}
static inline bool is_md_suspended(struct mddev *mddev)
{
return percpu_ref_is_dying(&mddev->active_io);
}
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
......@@ -641,7 +622,6 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
......@@ -766,7 +746,6 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t
extern void md_wakeup_thread(struct md_thread __rcu *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
extern int mddev_init_writes_pending(struct mddev *mddev);
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
extern void md_write_end(struct mddev *mddev);
......@@ -793,7 +772,8 @@ extern int md_integrity_register(struct mddev *mddev);
extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
extern int mddev_init(struct mddev *mddev);
extern void mddev_destroy(struct mddev *mddev);
struct mddev *md_alloc(dev_t dev, char *name);
void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
......@@ -804,15 +784,14 @@ extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
bool is_suspend);
extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
bool is_suspend);
extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev);
extern void mddev_destroy_serial_pool(struct mddev *mddev,
struct md_rdev *rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
......@@ -850,6 +829,33 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
static inline int mddev_suspend_and_lock(struct mddev *mddev)
{
int ret;
ret = mddev_suspend(mddev, true);
if (ret)
return ret;
ret = mddev_lock(mddev);
if (ret)
mddev_resume(mddev);
return ret;
}
static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev)
{
mddev_suspend(mddev, false);
mutex_lock(&mddev->reconfig_mutex);
}
static inline void mddev_unlock_and_resume(struct mddev *mddev)
{
mddev_unlock(mddev);
mddev_resume(mddev);
}
struct mdu_array_info_s;
struct mdu_disk_info_s;
......
......@@ -1345,6 +1345,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int first_clone;
int max_sectors;
bool write_behind = false;
bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
if (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
......@@ -1405,7 +1406,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* write-mostly, which means we could allocate write behind
* bio later.
*/
if (rdev && test_bit(WriteMostly, &rdev->flags))
if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
write_behind = true;
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
......@@ -3122,8 +3123,7 @@ static int raid1_run(struct mddev *mddev)
mdname(mddev));
return -EIO;
}
if (mddev_init_writes_pending(mddev) < 0)
return -ENOMEM;
/*
* copy the already verified devices into our private RAID1
* bookkeeping area. [whatever we allocate in run(),
......
......@@ -4154,9 +4154,6 @@ static int raid10_run(struct mddev *mddev)
sector_t min_offset_diff = 0;
int first = 1;
if (mddev_init_writes_pending(mddev) < 0)
return -ENOMEM;
if (mddev->private == NULL) {
conf = setup_conf(mddev);
if (IS_ERR(conf))
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,3 +2,16 @@
config NVME_COMMON
tristate
config NVME_KEYRING
bool
select KEYS
config NVME_AUTH
bool
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
......@@ -4,4 +4,5 @@ ccflags-y += -I$(src)
obj-$(CONFIG_NVME_COMMON) += nvme-common.o
nvme-common-y += auth.o
nvme-common-$(CONFIG_NVME_AUTH) += auth.o
nvme-common-$(CONFIG_NVME_KEYRING) += keyring.o
......@@ -150,6 +150,14 @@ size_t nvme_auth_hmac_hash_len(u8 hmac_id)
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
u32 nvme_auth_key_struct_size(u32 key_len)
{
struct nvme_dhchap_key key;
return struct_size(&key, key, key_len);
}
EXPORT_SYMBOL_GPL(nvme_auth_key_struct_size);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash)
{
......@@ -163,14 +171,9 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
p = strrchr(secret, ':');
if (p)
allocated_len = p - secret;
key = kzalloc(sizeof(*key), GFP_KERNEL);
key = nvme_auth_alloc_key(allocated_len, 0);
if (!key)
return ERR_PTR(-ENOMEM);
key->key = kzalloc(allocated_len, GFP_KERNEL);
if (!key->key) {
ret = -ENOMEM;
goto out_free_key;
}
key_len = base64_decode(secret, allocated_len, key->key);
if (key_len < 0) {
......@@ -187,14 +190,6 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
goto out_free_secret;
}
if (key_hash > 0 &&
(key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
pr_err("Mismatched key len %d for %s\n", key_len,
nvme_auth_hmac_name(key_hash));
ret = -EINVAL;
goto out_free_secret;
}
/* The last four bytes is the CRC in little-endian format */
key_len -= 4;
/*
......@@ -213,37 +208,51 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
key->hash = key_hash;
return key;
out_free_secret:
kfree_sensitive(key->key);
out_free_key:
kfree(key);
nvme_auth_free_key(key);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash)
{
u32 num_bytes = nvme_auth_key_struct_size(len);
struct nvme_dhchap_key *key = kzalloc(num_bytes, GFP_KERNEL);
if (key) {
key->len = len;
key->hash = hash;
}
return key;
}
EXPORT_SYMBOL_GPL(nvme_auth_alloc_key);
void nvme_auth_free_key(struct nvme_dhchap_key *key)
{
if (!key)
return;
kfree_sensitive(key->key);
kfree(key);
kfree_sensitive(key);
}
EXPORT_SYMBOL_GPL(nvme_auth_free_key);
u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
struct nvme_dhchap_key *nvme_auth_transform_key(
struct nvme_dhchap_key *key, char *nqn)
{
const char *hmac_name;
struct crypto_shash *key_tfm;
struct shash_desc *shash;
u8 *transformed_key;
int ret;
struct nvme_dhchap_key *transformed_key;
int ret, key_len;
if (!key || !key->key) {
if (!key) {
pr_warn("No key specified\n");
return ERR_PTR(-ENOKEY);
}
if (key->hash == 0) {
transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
key_len = nvme_auth_key_struct_size(key->len);
transformed_key = kmemdup(key, key_len, GFP_KERNEL);
if (!transformed_key)
return ERR_PTR(-ENOMEM);
return transformed_key;
}
hmac_name = nvme_auth_hmac_name(key->hash);
if (!hmac_name) {
......@@ -253,7 +262,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(key_tfm))
return (u8 *)key_tfm;
return ERR_CAST(key_tfm);
shash = kmalloc(sizeof(struct shash_desc) +
crypto_shash_descsize(key_tfm),
......@@ -263,7 +272,8 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
goto out_free_key;
}
transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
key_len = crypto_shash_digestsize(key_tfm);
transformed_key = nvme_auth_alloc_key(key_len, key->hash);
if (!transformed_key) {
ret = -ENOMEM;
goto out_free_shash;
......@@ -282,7 +292,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
if (ret < 0)
goto out_free_transformed_key;
ret = crypto_shash_final(shash, transformed_key);
ret = crypto_shash_final(shash, transformed_key->key);
if (ret < 0)
goto out_free_transformed_key;
......@@ -292,7 +302,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
return transformed_key;
out_free_transformed_key:
kfree_sensitive(transformed_key);
nvme_auth_free_key(transformed_key);
out_free_shash:
kfree(shash);
out_free_key:
......
This diff is collapsed.
......@@ -92,16 +92,26 @@ config NVME_TCP
If unsure, say N.
config NVME_AUTH
config NVME_TCP_TLS
bool "NVMe over Fabrics TCP TLS encryption support"
depends on NVME_TCP
select NVME_COMMON
select NVME_KEYRING
select NET_HANDSHAKE
select KEYS
help
Enables TLS encryption for NVMe TCP using the netlink handshake API.
The TLS handshake daemon is availble at
https://github.com/oracle/ktls-utils.
If unsure, say N.
config NVME_HOST_AUTH
bool "NVM Express over Fabrics In-Band Authentication"
depends on NVME_CORE
select NVME_COMMON
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
select NVME_AUTH
help
This provides support for NVMe over Fabrics In-Band Authentication.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -924,7 +924,6 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
return false;
req->mq_hctx->tags->rqs[req->tag] = req;
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment