Commit f50fff73 authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Jens Axboe

nvme: implement In-Band authentication

Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
This patch adds two new fabric options 'dhchap_secret' to specify the
pre-shared key (in ASCII respresentation according to NVMe 2.0 section
8.13.5.8 'Secret representation') and 'dhchap_ctrl_secret' to specify
the pre-shared controller key for bi-directional authentication of both
the host and the controller.
Re-authentication can be triggered by writing the PSK into the new
controller sysfs attribute 'dhchap_secret' or 'dhchap_ctrl_secret'.
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
[axboe: fold in clang build fix]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3bf2fde6
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
menu "NVME Support" menu "NVME Support"
source "drivers/nvme/common/Kconfig"
source "drivers/nvme/host/Kconfig" source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig" source "drivers/nvme/target/Kconfig"
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NVME_COMMON) += common/
obj-y += host/ obj-y += host/
obj-y += target/ obj-y += target/
# SPDX-License-Identifier: GPL-2.0-only
config NVME_COMMON
tristate
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -I$(src)
obj-$(CONFIG_NVME_COMMON) += nvme-common.o
nvme-common-y += auth.o
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Hannes Reinecke, SUSE Linux
*/
#include <linux/module.h>
#include <linux/crc32.h>
#include <linux/base64.h>
#include <linux/prandom.h>
#include <linux/scatterlist.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
#include <linux/nvme.h>
#include <linux/nvme-auth.h>
static u32 nvme_dhchap_seqnum;
static DEFINE_MUTEX(nvme_dhchap_mutex);
u32 nvme_auth_get_seqnum(void)
{
u32 seqnum;
mutex_lock(&nvme_dhchap_mutex);
if (!nvme_dhchap_seqnum)
nvme_dhchap_seqnum = prandom_u32();
else {
nvme_dhchap_seqnum++;
if (!nvme_dhchap_seqnum)
nvme_dhchap_seqnum++;
}
seqnum = nvme_dhchap_seqnum;
mutex_unlock(&nvme_dhchap_mutex);
return seqnum;
}
EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
static struct nvme_auth_dhgroup_map {
const char name[16];
const char kpp[16];
} dhgroup_map[] = {
[NVME_AUTH_DHGROUP_NULL] = {
.name = "null", .kpp = "null" },
[NVME_AUTH_DHGROUP_2048] = {
.name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
[NVME_AUTH_DHGROUP_3072] = {
.name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
[NVME_AUTH_DHGROUP_4096] = {
.name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
[NVME_AUTH_DHGROUP_6144] = {
.name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
[NVME_AUTH_DHGROUP_8192] = {
.name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
};
const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
{
if (dhgroup_id > ARRAY_SIZE(dhgroup_map))
return NULL;
return dhgroup_map[dhgroup_id].name;
}
EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
{
if (dhgroup_id > ARRAY_SIZE(dhgroup_map))
return NULL;
return dhgroup_map[dhgroup_id].kpp;
}
EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
{
int i;
if (!dhgroup_name || !strlen(dhgroup_name))
return NVME_AUTH_DHGROUP_INVALID;
for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
if (!strlen(dhgroup_map[i].name))
continue;
if (!strncmp(dhgroup_map[i].name, dhgroup_name,
strlen(dhgroup_map[i].name)))
return i;
}
return NVME_AUTH_DHGROUP_INVALID;
}
EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
static struct nvme_dhchap_hash_map {
int len;
const char hmac[15];
const char digest[8];
} hash_map[] = {
[NVME_AUTH_HASH_SHA256] = {
.len = 32,
.hmac = "hmac(sha256)",
.digest = "sha256",
},
[NVME_AUTH_HASH_SHA384] = {
.len = 48,
.hmac = "hmac(sha384)",
.digest = "sha384",
},
[NVME_AUTH_HASH_SHA512] = {
.len = 64,
.hmac = "hmac(sha512)",
.digest = "sha512",
},
};
const char *nvme_auth_hmac_name(u8 hmac_id)
{
if (hmac_id > ARRAY_SIZE(hash_map))
return NULL;
return hash_map[hmac_id].hmac;
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
const char *nvme_auth_digest_name(u8 hmac_id)
{
if (hmac_id > ARRAY_SIZE(hash_map))
return NULL;
return hash_map[hmac_id].digest;
}
EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
u8 nvme_auth_hmac_id(const char *hmac_name)
{
int i;
if (!hmac_name || !strlen(hmac_name))
return NVME_AUTH_HASH_INVALID;
for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
if (!strlen(hash_map[i].hmac))
continue;
if (!strncmp(hash_map[i].hmac, hmac_name,
strlen(hash_map[i].hmac)))
return i;
}
return NVME_AUTH_HASH_INVALID;
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id)
{
if (hmac_id > ARRAY_SIZE(hash_map))
return 0;
return hash_map[hmac_id].len;
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash)
{
struct nvme_dhchap_key *key;
unsigned char *p;
u32 crc;
int ret, key_len;
size_t allocated_len = strlen(secret);
/* Secret might be affixed with a ':' */
p = strrchr(secret, ':');
if (p)
allocated_len = p - secret;
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
key->key = kzalloc(allocated_len, GFP_KERNEL);
if (!key->key) {
ret = -ENOMEM;
goto out_free_key;
}
key_len = base64_decode(secret, allocated_len, key->key);
if (key_len < 0) {
pr_debug("base64 key decoding error %d\n",
key_len);
ret = key_len;
goto out_free_secret;
}
if (key_len != 36 && key_len != 52 &&
key_len != 68) {
pr_err("Invalid key len %d\n", key_len);
ret = -EINVAL;
goto out_free_secret;
}
if (key_hash > 0 &&
(key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
pr_err("Mismatched key len %d for %s\n", key_len,
nvme_auth_hmac_name(key_hash));
ret = -EINVAL;
goto out_free_secret;
}
/* The last four bytes is the CRC in little-endian format */
key_len -= 4;
/*
* The linux implementation doesn't do pre- and post-increments,
* so we have to do it manually.
*/
crc = ~crc32(~0, key->key, key_len);
if (get_unaligned_le32(key->key + key_len) != crc) {
pr_err("key crc mismatch (key %08x, crc %08x)\n",
get_unaligned_le32(key->key + key_len), crc);
ret = -EKEYREJECTED;
goto out_free_secret;
}
key->len = key_len;
key->hash = key_hash;
return key;
out_free_secret:
kfree_sensitive(key->key);
out_free_key:
kfree(key);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
void nvme_auth_free_key(struct nvme_dhchap_key *key)
{
if (!key)
return;
kfree_sensitive(key->key);
kfree(key);
}
EXPORT_SYMBOL_GPL(nvme_auth_free_key);
u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
{
const char *hmac_name;
struct crypto_shash *key_tfm;
struct shash_desc *shash;
u8 *transformed_key;
int ret;
if (!key || !key->key) {
pr_warn("No key specified\n");
return ERR_PTR(-ENOKEY);
}
if (key->hash == 0) {
transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
}
hmac_name = nvme_auth_hmac_name(key->hash);
if (!hmac_name) {
pr_warn("Invalid key hash id %d\n", key->hash);
return ERR_PTR(-EINVAL);
}
key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(key_tfm))
return (u8 *)key_tfm;
shash = kmalloc(sizeof(struct shash_desc) +
crypto_shash_descsize(key_tfm),
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
goto out_free_key;
}
transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
if (!transformed_key) {
ret = -ENOMEM;
goto out_free_shash;
}
shash->tfm = key_tfm;
ret = crypto_shash_setkey(key_tfm, key->key, key->len);
if (ret < 0)
goto out_free_shash;
ret = crypto_shash_init(shash);
if (ret < 0)
goto out_free_shash;
ret = crypto_shash_update(shash, nqn, strlen(nqn));
if (ret < 0)
goto out_free_shash;
ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
if (ret < 0)
goto out_free_shash;
ret = crypto_shash_final(shash, transformed_key);
out_free_shash:
kfree(shash);
out_free_key:
crypto_free_shash(key_tfm);
if (ret < 0) {
kfree_sensitive(transformed_key);
return ERR_PTR(ret);
}
return transformed_key;
}
EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
{
struct nvme_dhchap_key *key;
u8 key_hash;
if (!secret) {
*ret_key = NULL;
return 0;
}
if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
return -EINVAL;
/* Pass in the secret without the 'DHHC-1:XX:' prefix */
key = nvme_auth_extract_key(secret + 10, key_hash);
if (IS_ERR(key)) {
*ret_key = NULL;
return PTR_ERR(key);
}
*ret_key = key;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
MODULE_LICENSE("GPL v2");
...@@ -92,6 +92,19 @@ config NVME_TCP ...@@ -92,6 +92,19 @@ config NVME_TCP
If unsure, say N. If unsure, say N.
config NVME_AUTH
bool "NVM Express over Fabrics In-Band Authentication"
depends on NVME_CORE
select NVME_COMMON
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA256
select CRYPTO_SHA512
help
This provides support for NVMe over Fabrics In-Band Authentication.
If unsure, say N.
config NVME_APPLE config NVME_APPLE
tristate "Apple ANS2 NVM Express host driver" tristate "Apple ANS2 NVM Express host driver"
depends on OF && BLOCK depends on OF && BLOCK
......
...@@ -16,6 +16,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o ...@@ -16,6 +16,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
nvme-core-$(CONFIG_NVME_AUTH) += auth.o
nvme-y += pci.o nvme-y += pci.o
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "nvme.h" #include "nvme.h"
#include "fabrics.h" #include "fabrics.h"
#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
...@@ -330,6 +331,7 @@ enum nvme_disposition { ...@@ -330,6 +331,7 @@ enum nvme_disposition {
COMPLETE, COMPLETE,
RETRY, RETRY,
FAILOVER, FAILOVER,
AUTHENTICATE,
}; };
static inline enum nvme_disposition nvme_decide_disposition(struct request *req) static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
...@@ -337,6 +339,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) ...@@ -337,6 +339,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0)) if (likely(nvme_req(req)->status == 0))
return COMPLETE; return COMPLETE;
if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
return AUTHENTICATE;
if (blk_noretry_request(req) || if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) || (nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries) nvme_req(req)->retries >= nvme_max_retries)
...@@ -375,11 +380,13 @@ static inline void nvme_end_req(struct request *req) ...@@ -375,11 +380,13 @@ static inline void nvme_end_req(struct request *req)
void nvme_complete_rq(struct request *req) void nvme_complete_rq(struct request *req)
{ {
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
trace_nvme_complete_rq(req); trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req); nvme_cleanup_cmd(req);
if (nvme_req(req)->ctrl->kas) if (ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true; ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) { switch (nvme_decide_disposition(req)) {
case COMPLETE: case COMPLETE:
...@@ -391,6 +398,14 @@ void nvme_complete_rq(struct request *req) ...@@ -391,6 +398,14 @@ void nvme_complete_rq(struct request *req)
case FAILOVER: case FAILOVER:
nvme_failover_req(req); nvme_failover_req(req);
return; return;
case AUTHENTICATE:
#ifdef CONFIG_NVME_AUTH
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
nvme_retry_req(req);
#else
nvme_end_req(req);
#endif
return;
} }
} }
EXPORT_SYMBOL_GPL(nvme_complete_rq); EXPORT_SYMBOL_GPL(nvme_complete_rq);
...@@ -702,7 +717,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, ...@@ -702,7 +717,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) { switch (ctrl->state) {
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect) (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
return true; return true;
break; break;
default: default:
...@@ -3609,6 +3626,108 @@ static ssize_t dctype_show(struct device *dev, ...@@ -3609,6 +3626,108 @@ static ssize_t dctype_show(struct device *dev,
} }
static DEVICE_ATTR_RO(dctype); static DEVICE_ATTR_RO(dctype);
#ifdef CONFIG_NVME_AUTH
static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
struct nvmf_ctrl_options *opts = ctrl->opts;
if (!opts->dhchap_secret)
return sysfs_emit(buf, "none\n");
return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
}
static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
struct nvmf_ctrl_options *opts = ctrl->opts;
char *dhchap_secret;
if (!ctrl->opts->dhchap_secret)
return -EINVAL;
if (count < 7)
return -EINVAL;
if (memcmp(buf, "DHHC-1:", 7))
return -EINVAL;
dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
if (!dhchap_secret)
return -ENOMEM;
memcpy(dhchap_secret, buf, count);
nvme_auth_stop(ctrl);
if (strcmp(dhchap_secret, opts->dhchap_secret)) {
int ret;
ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
if (ret)
return ret;
kfree(opts->dhchap_secret);
opts->dhchap_secret = dhchap_secret;
/* Key has changed; re-authentication with new key */
nvme_auth_reset(ctrl);
}
/* Start re-authentication */
dev_info(ctrl->device, "re-authenticating controller\n");
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
return count;
}
static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
struct nvmf_ctrl_options *opts = ctrl->opts;
if (!opts->dhchap_ctrl_secret)
return sysfs_emit(buf, "none\n");
return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
}
static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
struct nvmf_ctrl_options *opts = ctrl->opts;
char *dhchap_secret;
if (!ctrl->opts->dhchap_ctrl_secret)
return -EINVAL;
if (count < 7)
return -EINVAL;
if (memcmp(buf, "DHHC-1:", 7))
return -EINVAL;
dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
if (!dhchap_secret)
return -ENOMEM;
memcpy(dhchap_secret, buf, count);
nvme_auth_stop(ctrl);
if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
int ret;
ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
if (ret)
return ret;
kfree(opts->dhchap_ctrl_secret);
opts->dhchap_ctrl_secret = dhchap_secret;
/* Key has changed; re-authentication with new key */
nvme_auth_reset(ctrl);
}
/* Start re-authentication */
dev_info(ctrl->device, "re-authenticating controller\n");
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
return count;
}
static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
#endif
static struct attribute *nvme_dev_attrs[] = { static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr, &dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr, &dev_attr_rescan_controller.attr,
...@@ -3632,6 +3751,10 @@ static struct attribute *nvme_dev_attrs[] = { ...@@ -3632,6 +3751,10 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr, &dev_attr_kato.attr,
&dev_attr_cntrltype.attr, &dev_attr_cntrltype.attr,
&dev_attr_dctype.attr, &dev_attr_dctype.attr,
#ifdef CONFIG_NVME_AUTH
&dev_attr_dhchap_secret.attr,
&dev_attr_dhchap_ctrl_secret.attr,
#endif
NULL NULL
}; };
...@@ -3655,6 +3778,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, ...@@ -3655,6 +3778,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0; return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0; return 0;
#ifdef CONFIG_NVME_AUTH
if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
return 0;
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
return 0;
#endif
return a->mode; return a->mode;
} }
...@@ -4548,8 +4677,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) ...@@ -4548,8 +4677,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's * recovery actions from interfering with the controller's
* firmware activation. * firmware activation.
*/ */
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
nvme_auth_stop(ctrl);
queue_work(nvme_wq, &ctrl->fw_act_work); queue_work(nvme_wq, &ctrl->fw_act_work);
}
break; break;
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA: case NVME_AER_NOTICE_ANA:
...@@ -4613,6 +4744,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event); ...@@ -4613,6 +4744,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl) void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{ {
nvme_mpath_stop(ctrl); nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl); nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl); nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
...@@ -4672,6 +4804,8 @@ static void nvme_free_ctrl(struct device *dev) ...@@ -4672,6 +4804,8 @@ static void nvme_free_ctrl(struct device *dev)
nvme_free_cels(ctrl); nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl); nvme_mpath_uninit(ctrl);
nvme_auth_stop(ctrl);
nvme_auth_free(ctrl);
__free_page(ctrl->discard_page); __free_page(ctrl->discard_page);
if (subsys) { if (subsys) {
...@@ -4762,6 +4896,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ...@@ -4762,6 +4896,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl); nvme_mpath_init_ctrl(ctrl);
nvme_auth_init_ctrl(ctrl);
return 0; return 0;
out_free_name: out_free_name:
......
...@@ -369,6 +369,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -369,6 +369,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
union nvme_result res; union nvme_result res;
struct nvmf_connect_data *data; struct nvmf_connect_data *data;
int ret; int ret;
u32 result;
cmd.connect.opcode = nvme_fabrics_command; cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.fctype = nvme_fabrics_type_connect;
...@@ -401,8 +402,25 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -401,8 +402,25 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
goto out_free_data; goto out_free_data;
} }
ctrl->cntlid = le16_to_cpu(res.u16); result = le32_to_cpu(res.u32);
ctrl->cntlid = result & 0xFFFF;
if ((result >> 16) & 0x3) {
/* Authentication required */
ret = nvme_auth_negotiate(ctrl, 0);
if (ret) {
dev_warn(ctrl->device,
"qid 0: authentication setup failed\n");
ret = NVME_SC_AUTH_REQUIRED;
goto out_free_data;
}
ret = nvme_auth_wait(ctrl, 0);
if (ret)
dev_warn(ctrl->device,
"qid 0: authentication failed\n");
else
dev_info(ctrl->device,
"qid 0: authenticated\n");
}
out_free_data: out_free_data:
kfree(data); kfree(data);
return ret; return ret;
...@@ -435,6 +453,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -435,6 +453,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
struct nvmf_connect_data *data; struct nvmf_connect_data *data;
union nvme_result res; union nvme_result res;
int ret; int ret;
u32 result;
cmd.connect.opcode = nvme_fabrics_command; cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.fctype = nvme_fabrics_type_connect;
...@@ -460,6 +479,21 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -460,6 +479,21 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
} }
result = le32_to_cpu(res.u32);
if ((result >> 16) & 2) {
/* Authentication required */
ret = nvme_auth_negotiate(ctrl, qid);
if (ret) {
dev_warn(ctrl->device,
"qid %d: authentication setup failed\n", qid);
ret = NVME_SC_AUTH_REQUIRED;
} else {
ret = nvme_auth_wait(ctrl, qid);
if (ret)
dev_warn(ctrl->device,
"qid %u: authentication failed\n", qid);
}
}
kfree(data); kfree(data);
return ret; return ret;
} }
...@@ -552,6 +586,8 @@ static const match_table_t opt_tokens = { ...@@ -552,6 +586,8 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_TOS, "tos=%d" }, { NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" }, { NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
{ NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
{ NVMF_OPT_ERR, NULL } { NVMF_OPT_ERR, NULL }
}; };
...@@ -833,6 +869,34 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -833,6 +869,34 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DISCOVERY: case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true; opts->discovery_nqn = true;
break; break;
case NVMF_OPT_DHCHAP_SECRET:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
pr_err("Invalid DH-CHAP secret %s\n", p);
ret = -EINVAL;
goto out;
}
kfree(opts->dhchap_secret);
opts->dhchap_secret = p;
break;
case NVMF_OPT_DHCHAP_CTRL_SECRET:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
pr_err("Invalid DH-CHAP secret %s\n", p);
ret = -EINVAL;
goto out;
}
kfree(opts->dhchap_ctrl_secret);
opts->dhchap_ctrl_secret = p;
break;
default: default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p); p);
...@@ -951,6 +1015,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) ...@@ -951,6 +1015,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->subsysnqn); kfree(opts->subsysnqn);
kfree(opts->host_traddr); kfree(opts->host_traddr);
kfree(opts->host_iface); kfree(opts->host_iface);
kfree(opts->dhchap_secret);
kfree(opts->dhchap_ctrl_secret);
kfree(opts); kfree(opts);
} }
EXPORT_SYMBOL_GPL(nvmf_free_options); EXPORT_SYMBOL_GPL(nvmf_free_options);
...@@ -960,7 +1026,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options); ...@@ -960,7 +1026,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
NVMF_OPT_FAIL_FAST_TMO) NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
NVMF_OPT_DHCHAP_CTRL_SECRET)
static struct nvme_ctrl * static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf) nvmf_create_ctrl(struct device *dev, const char *buf)
...@@ -1196,7 +1263,14 @@ static void __exit nvmf_exit(void) ...@@ -1196,7 +1263,14 @@ static void __exit nvmf_exit(void)
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
} }
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
......
...@@ -68,6 +68,8 @@ enum { ...@@ -68,6 +68,8 @@ enum {
NVMF_OPT_FAIL_FAST_TMO = 1 << 20, NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21, NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22, NVMF_OPT_DISCOVERY = 1 << 22,
NVMF_OPT_DHCHAP_SECRET = 1 << 23,
NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
}; };
/** /**
...@@ -97,6 +99,9 @@ enum { ...@@ -97,6 +99,9 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing * @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove * the controller, (-1) means reconnect forever, zero means remove
* immediately; * immediately;
* @dhchap_secret: DH-HMAC-CHAP secret
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
* authentication
* @disable_sqflow: disable controller sq flow control * @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP) * @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP) * @data_digest: generate/verify data digest (TCP)
...@@ -121,6 +126,8 @@ struct nvmf_ctrl_options { ...@@ -121,6 +126,8 @@ struct nvmf_ctrl_options {
unsigned int kato; unsigned int kato;
struct nvmf_host *host; struct nvmf_host *host;
int max_reconnects; int max_reconnects;
char *dhchap_secret;
char *dhchap_ctrl_secret;
bool disable_sqflow; bool disable_sqflow;
bool hdr_digest; bool hdr_digest;
bool data_digest; bool data_digest;
......
...@@ -328,6 +328,15 @@ struct nvme_ctrl { ...@@ -328,6 +328,15 @@ struct nvme_ctrl {
struct work_struct ana_work; struct work_struct ana_work;
#endif #endif
#ifdef CONFIG_NVME_AUTH
struct work_struct dhchap_auth_work;
struct list_head dhchap_auth_list;
struct mutex dhchap_auth_mutex;
struct nvme_dhchap_key *host_key;
struct nvme_dhchap_key *ctrl_key;
u16 transaction;
#endif
/* Power saving configuration */ /* Power saving configuration */
u64 ps_max_latency_us; u64 ps_max_latency_us;
bool apst_enabled; bool apst_enabled;
...@@ -992,6 +1001,27 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) ...@@ -992,6 +1001,27 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1)); return ctrl->sgls & ((1 << 0) | (1 << 1));
} }
#ifdef CONFIG_NVME_AUTH
void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_auth_stop(struct nvme_ctrl *ctrl);
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
void nvme_auth_reset(struct nvme_ctrl *ctrl);
void nvme_auth_free(struct nvme_ctrl *ctrl);
#else
static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
{
return -EPROTONOSUPPORT;
}
static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
{
return NVME_SC_AUTH_REQUIRED;
}
static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode); u8 opcode);
int nvme_execute_passthru_rq(struct request *rq); int nvme_execute_passthru_rq(struct request *rq);
......
...@@ -1205,6 +1205,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) ...@@ -1205,6 +1205,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work); struct nvme_rdma_ctrl, err_work);
nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl); nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work); flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false); nvme_rdma_teardown_io_queues(ctrl, false);
......
...@@ -2173,6 +2173,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) ...@@ -2173,6 +2173,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work); struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl); nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false); nvme_tcp_teardown_io_queues(ctrl, false);
......
...@@ -287,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc) ...@@ -287,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
return ret; return ret;
} }
static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
u8 spsp0 = spc[1];
u8 spsp1 = spc[2];
u8 secp = spc[3];
u32 tl = get_unaligned_le32(spc + 4);
trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
spsp0, spsp1, secp, tl);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
u8 spsp0 = spc[1];
u8 spsp1 = spc[2];
u8 secp = spc[3];
u32 al = get_unaligned_le32(spc + 4);
trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
spsp0, spsp1, secp, al);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc) static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{ {
const char *ret = trace_seq_buffer_ptr(p); const char *ret = trace_seq_buffer_ptr(p);
...@@ -306,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, ...@@ -306,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvme_trace_fabrics_connect(p, spc); return nvme_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get: case nvme_fabrics_type_property_get:
return nvme_trace_fabrics_property_get(p, spc); return nvme_trace_fabrics_property_get(p, spc);
case nvme_fabrics_type_auth_send:
return nvme_trace_fabrics_auth_send(p, spc);
case nvme_fabrics_type_auth_receive:
return nvme_trace_fabrics_auth_receive(p, spc);
default: default:
return nvme_trace_fabrics_common(p, spc); return nvme_trace_fabrics_common(p, spc);
} }
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
*/
#ifndef _NVME_AUTH_H
#define _NVME_AUTH_H
#include <crypto/kpp.h>
struct nvme_dhchap_key {
u8 *key;
size_t len;
u8 hash;
};
u32 nvme_auth_get_seqnum(void);
const char *nvme_auth_dhgroup_name(u8 dhgroup_id);
const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
const char *nvme_auth_hmac_name(u8 hmac_id);
const char *nvme_auth_digest_name(u8 hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id);
u8 nvme_auth_hmac_id(const char *hmac_name);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash);
void nvme_auth_free_key(struct nvme_dhchap_key *key);
u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
#endif /* _NVME_AUTH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment