Commit 95dddcb5 authored by Dan Williams's avatar Dan Williams

Merge branch 'for-6.2/cxl-security' into for-6.2/cxl

Pick CXL PMEM security commands for v6.2. Resolve conflicts with the
removal of the cxl_pmem_wq.
parents da8380bb d18bc74a
...@@ -41,3 +41,17 @@ KernelVersion: 5.18 ...@@ -41,3 +41,17 @@ KernelVersion: 5.18
Contact: Kajol Jain <kjain@linux.ibm.com> Contact: Kajol Jain <kjain@linux.ibm.com>
Description: (RO) This sysfs file exposes the cpumask which is designated to Description: (RO) This sysfs file exposes the cpumask which is designated to
to retrieve nvdimm pmu event counter data. to retrieve nvdimm pmu event counter data.
What: /sys/bus/nd/devices/nmemX/cxl/id
Date: November 2022
KernelVersion: 6.2
Contact: Dave Jiang <dave.jiang@intel.com>
Description: (RO) Show the id (serial) of the device. This is CXL specific.
What: /sys/bus/nd/devices/nmemX/cxl/provider
Date: November 2022
KernelVersion: 6.2
Contact: Dave Jiang <dave.jiang@intel.com>
Description: (RO) Shows the CXL bridge device that ties to a CXL memory device
to this NVDIMM device. I.e. the parent of the device returned is
a /sys/bus/cxl/devices/memX instance.
...@@ -212,9 +212,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, ...@@ -212,9 +212,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask)) if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
return -ENOTTY; return -ENOTTY;
if (!cpu_cache_has_invalidate_memregion())
return -EINVAL;
memcpy(nd_cmd.cmd.passphrase, key_data->data, memcpy(nd_cmd.cmd.passphrase, key_data->data,
sizeof(nd_cmd.cmd.passphrase)); sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
...@@ -229,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, ...@@ -229,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
return -EIO; return -EIO;
} }
/* DIMM unlocked, invalidate all CPU caches before we read it */
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
return 0; return 0;
} }
...@@ -299,11 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, ...@@ -299,11 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
if (!test_bit(cmd, &nfit_mem->dsm_mask)) if (!test_bit(cmd, &nfit_mem->dsm_mask))
return -ENOTTY; return -ENOTTY;
if (!cpu_cache_has_invalidate_memregion())
return -EINVAL;
/* flush all cache before we erase DIMM */
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
memcpy(nd_cmd.cmd.passphrase, key->data, memcpy(nd_cmd.cmd.passphrase, key->data,
sizeof(nd_cmd.cmd.passphrase)); sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
...@@ -322,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, ...@@ -322,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
return -ENXIO; return -ENXIO;
} }
/* DIMM erased, invalidate all CPU caches before we read it */
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
return 0; return 0;
} }
...@@ -346,9 +333,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) ...@@ -346,9 +333,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask)) if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY; return -ENOTTY;
if (!cpu_cache_has_invalidate_memregion())
return -EINVAL;
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -362,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) ...@@ -362,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
return -ENXIO; return -ENXIO;
} }
/* flush all cache before we make the nvdimms available */
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
return 0; return 0;
} }
...@@ -388,11 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, ...@@ -388,11 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY; return -ENOTTY;
if (!cpu_cache_has_invalidate_memregion())
return -EINVAL;
/* flush all cache before we erase DIMM */
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
memcpy(nd_cmd.cmd.passphrase, nkey->data, memcpy(nd_cmd.cmd.passphrase, nkey->data,
sizeof(nd_cmd.cmd.passphrase)); sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
...@@ -770,5 +747,3 @@ static const struct nvdimm_fw_ops __intel_fw_ops = { ...@@ -770,5 +747,3 @@ static const struct nvdimm_fw_ops __intel_fw_ops = {
}; };
const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
MODULE_IMPORT_NS(DEVMEM);
...@@ -111,4 +111,22 @@ config CXL_REGION ...@@ -111,4 +111,22 @@ config CXL_REGION
select MEMREGION select MEMREGION
select GET_FREE_REGION select GET_FREE_REGION
config CXL_REGION_INVALIDATION_TEST
bool "CXL: Region Cache Management Bypass (TEST)"
depends on CXL_REGION
help
CXL Region management and security operations potentially invalidate
the content of CPU caches without notifiying those caches to
invalidate the affected cachelines. The CXL Region driver attempts
to invalidate caches when those events occur. If that invalidation
fails the region will fail to enable. Reasons for cache
invalidation failure are due to the CPU not providing a cache
invalidation mechanism. For example usage of wbinvd is restricted to
bare metal x86. However, for testing purposes toggling this option
can disable that data integrity safety and proceed with enabling
regions when there might be conflicting contents in the CPU cache.
If unsure, or if this kernel is meant for production environments,
say N.
endif endif
...@@ -9,5 +9,5 @@ obj-$(CONFIG_CXL_PORT) += cxl_port.o ...@@ -9,5 +9,5 @@ obj-$(CONFIG_CXL_PORT) += cxl_port.o
cxl_mem-y := mem.o cxl_mem-y := mem.o
cxl_pci-y := pci.o cxl_pci-y := pci.o
cxl_acpi-y := acpi.o cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o cxl_pmem-y := pmem.o security.o
cxl_port-y := port.o cxl_port-y := port.o
...@@ -65,6 +65,12 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { ...@@ -65,6 +65,12 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0),
CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0),
CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0),
CXL_CMD(FREEZE_SECURITY, 0, 0, 0),
CXL_CMD(UNLOCK, 0x20, 0, 0),
CXL_CMD(PASSPHRASE_SECURE_ERASE, 0x40, 0, 0),
}; };
/* /*
...@@ -698,6 +704,16 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) ...@@ -698,6 +704,16 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
rc = 0; rc = 0;
} }
/*
* Setup permanently kernel exclusive commands, i.e. the
* mechanism is driven through sysfs, keyctl, etc...
*/
set_bit(CXL_MEM_COMMAND_ID_SET_PASSPHRASE, cxlds->exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_DISABLE_PASSPHRASE, cxlds->exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_UNLOCK, cxlds->exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_PASSPHRASE_SECURE_ERASE,
cxlds->exclusive_cmds);
out: out:
kvfree(gsl); kvfree(gsl);
return rc; return rc;
......
...@@ -216,6 +216,13 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb, ...@@ -216,6 +216,13 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
dev->parent = &cxlmd->dev; dev->parent = &cxlmd->dev;
dev->bus = &cxl_bus_type; dev->bus = &cxl_bus_type;
dev->type = &cxl_nvdimm_type; dev->type = &cxl_nvdimm_type;
/*
* A "%llx" string is 17-bytes vs dimm_id that is max
* NVDIMM_KEY_DESC_LEN
*/
BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
return cxl_nvd; return cxl_nvd;
} }
......
...@@ -1403,6 +1403,8 @@ static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos) ...@@ -1403,6 +1403,8 @@ static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
goto out; goto out;
down_read(&cxl_dpa_rwsem); down_read(&cxl_dpa_rwsem);
rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos); rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
if (rc == 0)
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
up_read(&cxl_dpa_rwsem); up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem); up_write(&cxl_region_rwsem);
out: out:
...@@ -1958,6 +1960,30 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) ...@@ -1958,6 +1960,30 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
return rc; return rc;
} }
static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
{
if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
return 0;
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
dev_warn(
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memergion() for testing!\n");
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
return 0;
} else {
dev_err(&cxlr->dev,
"Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
return 0;
}
static int cxl_region_probe(struct device *dev) static int cxl_region_probe(struct device *dev)
{ {
struct cxl_region *cxlr = to_cxl_region(dev); struct cxl_region *cxlr = to_cxl_region(dev);
...@@ -1973,14 +1999,21 @@ static int cxl_region_probe(struct device *dev) ...@@ -1973,14 +1999,21 @@ static int cxl_region_probe(struct device *dev)
if (p->state < CXL_CONFIG_COMMIT) { if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state); dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
rc = -ENXIO; rc = -ENXIO;
goto out;
} }
rc = cxl_region_invalidate_memregion(cxlr);
/* /*
* From this point on any path that changes the region's state away from * From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver. * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/ */
out:
up_read(&cxl_region_rwsem); up_read(&cxl_region_rwsem);
if (rc)
return rc;
switch (cxlr->mode) { switch (cxlr->mode) {
case CXL_DECODER_PMEM: case CXL_DECODER_PMEM:
return devm_cxl_add_pmem_region(cxlr); return devm_cxl_add_pmem_region(cxlr);
...@@ -2008,4 +2041,5 @@ void cxl_region_exit(void) ...@@ -2008,4 +2041,5 @@ void cxl_region_exit(void)
} }
MODULE_IMPORT_NS(CXL); MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(DEVMEM);
MODULE_ALIAS_CXL(CXL_DEVICE_REGION); MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
...@@ -388,6 +388,12 @@ struct cxl_region_params { ...@@ -388,6 +388,12 @@ struct cxl_region_params {
int nr_targets; int nr_targets;
}; };
/*
* Flag whether this region needs to have its HPA span synchronized with
* CPU cache state at region activation time.
*/
#define CXL_REGION_F_INCOHERENT 0
/** /**
* struct cxl_region - CXL region * struct cxl_region - CXL region
* @dev: This region's device * @dev: This region's device
...@@ -396,6 +402,7 @@ struct cxl_region_params { ...@@ -396,6 +402,7 @@ struct cxl_region_params {
* @type: Endpoint decoder target type * @type: Endpoint decoder target type
* @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
* @flags: Region state flags
* @params: active + config params for the region * @params: active + config params for the region
*/ */
struct cxl_region { struct cxl_region {
...@@ -405,6 +412,7 @@ struct cxl_region { ...@@ -405,6 +412,7 @@ struct cxl_region {
enum cxl_decoder_type type; enum cxl_decoder_type type;
struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem; struct cxl_pmem_region *cxlr_pmem;
unsigned long flags;
struct cxl_region_params params; struct cxl_region_params params;
}; };
...@@ -416,9 +424,12 @@ struct cxl_nvdimm_bridge { ...@@ -416,9 +424,12 @@ struct cxl_nvdimm_bridge {
struct nvdimm_bus_descriptor nd_desc; struct nvdimm_bus_descriptor nd_desc;
}; };
#define CXL_DEV_ID_LEN 19
struct cxl_nvdimm { struct cxl_nvdimm {
struct device dev; struct device dev;
struct cxl_memdev *cxlmd; struct cxl_memdev *cxlmd;
u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
}; };
struct cxl_pmem_region_mapping { struct cxl_pmem_region_mapping {
......
...@@ -288,6 +288,12 @@ enum cxl_opcode { ...@@ -288,6 +288,12 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
CXL_MBOX_OP_SCAN_MEDIA = 0x4304, CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500,
CXL_MBOX_OP_SET_PASSPHRASE = 0x4501,
CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502,
CXL_MBOX_OP_UNLOCK = 0x4503,
CXL_MBOX_OP_FREEZE_SECURITY = 0x4504,
CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE = 0x4505,
CXL_MBOX_OP_MAX = 0x10000 CXL_MBOX_OP_MAX = 0x10000
}; };
...@@ -387,6 +393,41 @@ struct cxl_mem_command { ...@@ -387,6 +393,41 @@ struct cxl_mem_command {
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
}; };
#define CXL_PMEM_SEC_STATE_USER_PASS_SET 0x01
#define CXL_PMEM_SEC_STATE_MASTER_PASS_SET 0x02
#define CXL_PMEM_SEC_STATE_LOCKED 0x04
#define CXL_PMEM_SEC_STATE_FROZEN 0x08
#define CXL_PMEM_SEC_STATE_USER_PLIMIT 0x10
#define CXL_PMEM_SEC_STATE_MASTER_PLIMIT 0x20
/* set passphrase input payload */
struct cxl_set_pass {
u8 type;
u8 reserved[31];
/* CXL field using NVDIMM define, same length */
u8 old_pass[NVDIMM_PASSPHRASE_LEN];
u8 new_pass[NVDIMM_PASSPHRASE_LEN];
} __packed;
/* disable passphrase input payload */
struct cxl_disable_pass {
u8 type;
u8 reserved[31];
u8 pass[NVDIMM_PASSPHRASE_LEN];
} __packed;
/* passphrase secure erase payload */
struct cxl_pass_erase {
u8 type;
u8 reserved[31];
u8 pass[NVDIMM_PASSPHRASE_LEN];
} __packed;
enum {
CXL_PMEM_SEC_PASS_MASTER = 0,
CXL_PMEM_SEC_PASS_USER,
};
int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size); size_t in_size, void *out, size_t out_size);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds); int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include "cxlmem.h" #include "cxlmem.h"
#include "cxl.h" #include "cxl.h"
extern const struct nvdimm_security_ops *cxl_security_ops;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *cxlds) static void clear_exclusive(void *cxlds)
...@@ -23,6 +25,41 @@ static void unregister_nvdimm(void *nvdimm) ...@@ -23,6 +25,41 @@ static void unregister_nvdimm(void *nvdimm)
nvdimm_delete(nvdimm); nvdimm_delete(nvdimm);
} }
static ssize_t provider_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
return sysfs_emit(buf, "%s\n", dev_name(&cxl_nvd->dev));
}
static DEVICE_ATTR_RO(provider);
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
return sysfs_emit(buf, "%lld\n", cxlds->serial);
}
static DEVICE_ATTR_RO(id);
static struct attribute *cxl_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_provider.attr,
NULL
};
static const struct attribute_group cxl_dimm_attribute_group = {
.name = "cxl",
.attrs = cxl_dimm_attributes,
};
static const struct attribute_group *cxl_dimm_attribute_groups[] = {
&cxl_dimm_attribute_group,
NULL
};
static int cxl_nvdimm_probe(struct device *dev) static int cxl_nvdimm_probe(struct device *dev)
{ {
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
...@@ -42,8 +79,10 @@ static int cxl_nvdimm_probe(struct device *dev) ...@@ -42,8 +79,10 @@ static int cxl_nvdimm_probe(struct device *dev)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
cmd_mask, 0, NULL); cxl_dimm_attribute_groups, flags,
cmd_mask, 0, NULL, cxl_nvd->dev_id,
cxl_security_ops, NULL);
if (!nvdimm) if (!nvdimm)
return -ENOMEM; return -ENOMEM;
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/libnvdimm.h>
#include <asm/unaligned.h>
#include <linux/module.h>
#include <linux/async.h>
#include <linux/slab.h>
#include <linux/memregion.h>
#include "cxlmem.h"
#include "cxl.h"
static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
unsigned long security_flags = 0;
u32 sec_out;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0,
&sec_out, sizeof(sec_out));
if (rc < 0)
return 0;
if (ptype == NVDIMM_MASTER) {
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
else
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT)
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
return security_flags;
}
if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
if (sec_out & CXL_PMEM_SEC_STATE_FROZEN ||
sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT)
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
if (sec_out & CXL_PMEM_SEC_STATE_LOCKED)
set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
else
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
} else {
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
}
return security_flags;
}
static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
const struct nvdimm_key_data *old_data,
const struct nvdimm_key_data *new_data,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_set_pass set_pass;
int rc;
set_pass.type = ptype == NVDIMM_MASTER ?
CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN);
memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN);
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE,
&set_pass, sizeof(set_pass), NULL, 0);
return rc;
}
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_disable_pass dis_pass;
int rc;
dis_pass.type = ptype == NVDIMM_MASTER ?
CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE,
&dis_pass, sizeof(dis_pass), NULL, 0);
return rc;
}
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_USER);
}
static int cxl_pmem_security_disable_master(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_MASTER);
}
static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0);
}
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
u8 pass[NVDIMM_PASSPHRASE_LEN];
int rc;
memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK,
pass, NVDIMM_PASSPHRASE_LEN, NULL, 0);
if (rc < 0)
return rc;
return 0;
}
static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_pass_erase erase;
int rc;
erase.type = ptype == NVDIMM_MASTER ?
CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN);
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE,
&erase, sizeof(erase), NULL, 0);
if (rc < 0)
return rc;
return 0;
}
static const struct nvdimm_security_ops __cxl_security_ops = {
.get_flags = cxl_pmem_get_security_flags,
.change_key = cxl_pmem_security_change_key,
.disable = cxl_pmem_security_disable,
.freeze = cxl_pmem_security_freeze,
.unlock = cxl_pmem_security_unlock,
.erase = cxl_pmem_security_passphrase_erase,
.disable_master = cxl_pmem_security_disable_master,
};
const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops;
...@@ -114,4 +114,16 @@ config NVDIMM_TEST_BUILD ...@@ -114,4 +114,16 @@ config NVDIMM_TEST_BUILD
core devm_memremap_pages() implementation and other core devm_memremap_pages() implementation and other
infrastructure. infrastructure.
config NVDIMM_SECURITY_TEST
bool "Enable NVDIMM security unit tests"
depends on NVDIMM_KEYS
help
The NVDIMM and CXL subsystems support unit testing of their device
security state machines. The NVDIMM_SECURITY_TEST option disables CPU
cache maintenance operations around events like secure erase and
overwrite. Also, when enabled, the NVDIMM subsystem core helps the unit
test implement a mock state machine.
Select N if unsure.
endif endif
...@@ -349,11 +349,18 @@ static ssize_t available_slots_show(struct device *dev, ...@@ -349,11 +349,18 @@ static ssize_t available_slots_show(struct device *dev,
} }
static DEVICE_ATTR_RO(available_slots); static DEVICE_ATTR_RO(available_slots);
__weak ssize_t security_show(struct device *dev, ssize_t security_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct nvdimm *nvdimm = to_nvdimm(dev); struct nvdimm *nvdimm = to_nvdimm(dev);
/*
* For the test version we need to poll the "hardware" in order
* to get the updated status for unlock testing.
*/
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
return sprintf(buf, "overwrite\n"); return sprintf(buf, "overwrite\n");
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* /*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/ */
#include <linux/memregion.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/device.h> #include <linux/device.h>
...@@ -100,6 +101,16 @@ static void nd_region_remove(struct device *dev) ...@@ -100,6 +101,16 @@ static void nd_region_remove(struct device *dev)
*/ */
sysfs_put(nd_region->bb_state); sysfs_put(nd_region->bb_state);
nd_region->bb_state = NULL; nd_region->bb_state = NULL;
/*
* Try to flush caches here since a disabled region may be subject to
* secure erase while disabled, and previous dirty data should not be
* written back to a new instance of the region. This only matters on
* bare metal where security commands are available, so silent failure
* here is ok.
*/
if (cpu_cache_has_invalidate_memregion())
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
} }
static int child_notify(struct device *dev, void *data) static int child_notify(struct device *dev, void *data)
......
...@@ -59,9 +59,51 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, ...@@ -59,9 +59,51 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
return 0; return 0;
} }
static int nd_region_invalidate_memregion(struct nd_region *nd_region)
{
int i, incoherent = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
incoherent++;
break;
}
}
if (!incoherent)
return 0;
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
dev_warn(
&nd_region->dev,
"Bypassing cpu_cache_invalidate_memergion() for testing!\n");
goto out;
} else {
dev_err(&nd_region->dev,
"Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
out:
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
clear_bit(NDD_INCOHERENT, &nvdimm->flags);
}
return 0;
}
int nd_region_activate(struct nd_region *nd_region) int nd_region_activate(struct nd_region *nd_region)
{ {
int i, j, num_flush = 0; int i, j, rc, num_flush = 0;
struct nd_region_data *ndrd; struct nd_region_data *ndrd;
struct device *dev = &nd_region->dev; struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *); size_t flush_data_size = sizeof(void *);
...@@ -85,6 +127,10 @@ int nd_region_activate(struct nd_region *nd_region) ...@@ -85,6 +127,10 @@ int nd_region_activate(struct nd_region *nd_region)
} }
nvdimm_bus_unlock(&nd_region->dev); nvdimm_bus_unlock(&nd_region->dev);
rc = nd_region_invalidate_memregion(nd_region);
if (rc)
return rc;
ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
if (!ndrd) if (!ndrd)
return -ENOMEM; return -ENOMEM;
...@@ -1222,3 +1268,5 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, ...@@ -1222,3 +1268,5 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
} }
MODULE_IMPORT_NS(DEVMEM);
...@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) ...@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
|| !nvdimm->sec.flags) || !nvdimm->sec.flags)
return -EIO; return -EIO;
/* cxl_test needs this to pre-populate the security state */
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
/* No need to go further if security is disabled */ /* No need to go further if security is disabled */
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return 0; return 0;
...@@ -204,6 +208,8 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) ...@@ -204,6 +208,8 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
rc = nvdimm->sec.ops->unlock(nvdimm, data); rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
rc == 0 ? "success" : "fail"); rc == 0 ? "success" : "fail");
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
nvdimm_put_key(key); nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
...@@ -239,7 +245,8 @@ static int check_security_state(struct nvdimm *nvdimm) ...@@ -239,7 +245,8 @@ static int check_security_state(struct nvdimm *nvdimm)
return 0; return 0;
} }
static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) static int security_disable(struct nvdimm *nvdimm, unsigned int keyid,
enum nvdimm_passphrase_type pass_type)
{ {
struct device *dev = &nvdimm->dev; struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
...@@ -250,8 +257,13 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) ...@@ -250,8 +257,13 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */ /* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex); lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable if (!nvdimm->sec.ops || !nvdimm->sec.flags)
|| !nvdimm->sec.flags) return -EOPNOTSUPP;
if (pass_type == NVDIMM_USER && !nvdimm->sec.ops->disable)
return -EOPNOTSUPP;
if (pass_type == NVDIMM_MASTER && !nvdimm->sec.ops->disable_master)
return -EOPNOTSUPP; return -EOPNOTSUPP;
rc = check_security_state(nvdimm); rc = check_security_state(nvdimm);
...@@ -263,12 +275,21 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) ...@@ -263,12 +275,21 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
if (!data) if (!data)
return -ENOKEY; return -ENOKEY;
rc = nvdimm->sec.ops->disable(nvdimm, data); if (pass_type == NVDIMM_MASTER) {
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), rc = nvdimm->sec.ops->disable_master(nvdimm, data);
dev_dbg(dev, "key: %d disable_master: %s\n", key_serial(key),
rc == 0 ? "success" : "fail"); rc == 0 ? "success" : "fail");
} else {
rc = nvdimm->sec.ops->disable(nvdimm, data);
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
}
nvdimm_put_key(key); nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); if (pass_type == NVDIMM_MASTER)
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
else
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc; return rc;
} }
...@@ -355,6 +376,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid, ...@@ -355,6 +376,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return -ENOKEY; return -ENOKEY;
rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type); rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)", pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail"); rc == 0 ? "success" : "fail");
...@@ -389,6 +412,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) ...@@ -389,6 +412,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -ENOKEY; return -ENOKEY;
rc = nvdimm->sec.ops->overwrite(nvdimm, data); rc = nvdimm->sec.ops->overwrite(nvdimm, data);
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
rc == 0 ? "success" : "fail"); rc == 0 ? "success" : "fail");
...@@ -473,6 +498,7 @@ void nvdimm_security_overwrite_query(struct work_struct *work) ...@@ -473,6 +498,7 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
#define OPS \ #define OPS \
C( OP_FREEZE, "freeze", 1), \ C( OP_FREEZE, "freeze", 1), \
C( OP_DISABLE, "disable", 2), \ C( OP_DISABLE, "disable", 2), \
C( OP_DISABLE_MASTER, "disable_master", 2), \
C( OP_UPDATE, "update", 3), \ C( OP_UPDATE, "update", 3), \
C( OP_ERASE, "erase", 2), \ C( OP_ERASE, "erase", 2), \
C( OP_OVERWRITE, "overwrite", 2), \ C( OP_OVERWRITE, "overwrite", 2), \
...@@ -524,7 +550,10 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len) ...@@ -524,7 +550,10 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
rc = nvdimm_security_freeze(nvdimm); rc = nvdimm_security_freeze(nvdimm);
} else if (i == OP_DISABLE) { } else if (i == OP_DISABLE) {
dev_dbg(dev, "disable %u\n", key); dev_dbg(dev, "disable %u\n", key);
rc = security_disable(nvdimm, key); rc = security_disable(nvdimm, key, NVDIMM_USER);
} else if (i == OP_DISABLE_MASTER) {
dev_dbg(dev, "disable_master %u\n", key);
rc = security_disable(nvdimm, key, NVDIMM_MASTER);
} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) { } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey); dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
rc = security_update(nvdimm, key, newkey, i == OP_UPDATE rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
......
...@@ -35,6 +35,11 @@ enum { ...@@ -35,6 +35,11 @@ enum {
NDD_WORK_PENDING = 4, NDD_WORK_PENDING = 4,
/* dimm supports namespace labels */ /* dimm supports namespace labels */
NDD_LABELING = 6, NDD_LABELING = 6,
/*
* dimm contents have changed requiring invalidation of CPU caches prior
* to activation of a region that includes this device
*/
NDD_INCOHERENT = 7,
/* need to set a limit somewhere, but yes, this is likely overkill */ /* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M, ND_IOCTL_MAX_BUFLEN = SZ_4M,
...@@ -183,6 +188,8 @@ struct nvdimm_security_ops { ...@@ -183,6 +188,8 @@ struct nvdimm_security_ops {
int (*overwrite)(struct nvdimm *nvdimm, int (*overwrite)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data); const struct nvdimm_key_data *key_data);
int (*query_overwrite)(struct nvdimm *nvdimm); int (*query_overwrite)(struct nvdimm *nvdimm);
int (*disable_master)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
}; };
enum nvdimm_fwa_state { enum nvdimm_fwa_state {
......
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
___C(SCAN_MEDIA, "Scan Media"), \ ___C(SCAN_MEDIA, "Scan Media"), \
___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \
___C(GET_SECURITY_STATE, "Get Security State"), \
___C(SET_PASSPHRASE, "Set Passphrase"), \
___C(DISABLE_PASSPHRASE, "Disable Passphrase"), \
___C(FREEZE_SECURITY, "Freeze Security"), \
___C(UNLOCK, "Unlock"), \
___C(PASSPHRASE_SECURE_ERASE, "Passphrase Secure Erase"), \
___C(MAX, "invalid / last command") ___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a #define ___C(a, b) CXL_MEM_COMMAND_ID_##a
......
...@@ -27,6 +27,7 @@ cxl_acpi-y += config_check.o ...@@ -27,6 +27,7 @@ cxl_acpi-y += config_check.o
obj-m += cxl_pmem.o obj-m += cxl_pmem.o
cxl_pmem-y := $(CXL_SRC)/pmem.o cxl_pmem-y := $(CXL_SRC)/pmem.o
cxl_pmem-y += $(CXL_SRC)/security.o
cxl_pmem-y += config_check.o cxl_pmem-y += config_check.o
obj-m += cxl_port.o obj-m += cxl_port.o
......
This diff is collapsed.
...@@ -79,7 +79,6 @@ libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o ...@@ -79,7 +79,6 @@ libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o
libnvdimm-y += dimm_devs.o
libnvdimm-y += libnvdimm_test.o libnvdimm-y += libnvdimm_test.o
libnvdimm-y += config_check.o libnvdimm-y += config_check.o
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright Intel Corp. 2018 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/nd.h>
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
#include "nd-core.h"
ssize_t security_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
/*
* For the test version we need to poll the "hardware" in order
* to get the updated status for unlock testing.
*/
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return sprintf(buf, "disabled\n");
if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
return sprintf(buf, "unlocked\n");
if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
return sprintf(buf, "locked\n");
return -ENOTTY;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment