Commit c1f0fcd8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'cxl-for-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl updates from Dan Williams:
 "Compute Express Link (CXL) updates for 6.2.

  While it may seem backwards, the CXL update this time around includes
  some focus on CXL 1.x enabling where the work to date had been with
  CXL 2.0 (VH topologies) in mind.

  First generation CXL can mostly be supported via BIOS, similar to DDR,
  however it became clear there are use cases for OS native CXL error
  handling and some CXL 3.0 endpoint features can be deployed on CXL 1.x
  hosts (Restricted CXL Host (RCH) topologies). So, this update brings
  RCH topologies into the Linux CXL device model.

  In support of the ongoing CXL 2.0+ enabling two new core kernel
  facilities are added.

  One is the ability for the kernel to flag collisions between userspace
  access to PCI configuration registers and kernel accesses. This is
  brought on by the PCIe Data-Object-Exchange (DOE) facility, a hardware
  mailbox over config-cycles.

  The other is a cpu_cache_invalidate_memregion() API that maps to
  wbinvd_on_all_cpus() on x86. To prevent abuse it is disabled in guest
  VMs and architectures that do not support it yet. The CXL paths that
  need it, dynamic memory region creation and security commands (erase /
  unlock), are disabled when it is not present.

  As for the CXL 2.0+ this cycle the subsystem gains support Persistent
  Memory Security commands, error handling in response to PCIe AER
  notifications, and support for the "XOR" host bridge interleave
  algorithm.

  Summary:

   - Add the cpu_cache_invalidate_memregion() API for cache flushing in
     response to physical memory reconfiguration, or memory-side data
     invalidation from operations like secure erase or memory-device
     unlock.

   - Add a facility for the kernel to warn about collisions between
     kernel and userspace access to PCI configuration registers

   - Add support for Restricted CXL Host (RCH) topologies (formerly CXL
     1.1)

   - Add handling and reporting of CXL errors reported via the PCIe AER
     mechanism

   - Add support for CXL Persistent Memory Security commands

   - Add support for the "XOR" algorithm for CXL host bridge interleave

   - Rework / simplify CXL to NVDIMM interactions

   - Miscellaneous cleanups and fixes"

* tag 'cxl-for-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (71 commits)
  cxl/region: Fix memdev reuse check
  cxl/pci: Remove endian confusion
  cxl/pci: Add some type-safety to the AER trace points
  cxl/security: Drop security command ioctl uapi
  cxl/mbox: Add variable output size validation for internal commands
  cxl/mbox: Enable cxl_mbox_send_cmd() users to validate output size
  cxl/security: Fix Get Security State output payload endian handling
  cxl: update names for interleave ways conversion macros
  cxl: update names for interleave granularity conversion macros
  cxl/acpi: Warn about an invalid CHBCR in an existing CHBS entry
  tools/testing/cxl: Require cache invalidation bypass
  cxl/acpi: Fail decoder add if CXIMS for HBIG is missing
  cxl/region: Fix spelling mistake "memergion" -> "memregion"
  cxl/regs: Fix sparse warning
  cxl/acpi: Set ACPI's CXL _OSC to indicate RCD mode support
  tools/testing/cxl: Add an RCH topology
  cxl/port: Add RCD endpoint port enumeration
  cxl/mem: Move devm_cxl_add_endpoint() from cxl_core to cxl_mem
  tools/testing/cxl: Add XOR Math support to cxl_test
  cxl/acpi: Support CXL XOR Interleave Math (CXIMS)
  ...
parents 691806e9 f04facfb
......@@ -41,3 +41,17 @@ KernelVersion: 5.18
Contact: Kajol Jain <kjain@linux.ibm.com>
Description: (RO) This sysfs file exposes the cpumask which is designated to
to retrieve nvdimm pmu event counter data.
What: /sys/bus/nd/devices/nmemX/cxl/id
Date: November 2022
KernelVersion: 6.2
Contact: Dave Jiang <dave.jiang@intel.com>
Description: (RO) Show the id (serial) of the device. This is CXL specific.
What: /sys/bus/nd/devices/nmemX/cxl/provider
Date: November 2022
KernelVersion: 6.2
Contact: Dave Jiang <dave.jiang@intel.com>
Description: (RO) Shows the CXL bridge device that ties to a CXL memory device
to this NVDIMM device. I.e. the parent of the device returned is
a /sys/bus/cxl/devices/memX instance.
......@@ -83,6 +83,7 @@ This structure has the form::
int (*mmio_enabled)(struct pci_dev *dev);
int (*slot_reset)(struct pci_dev *dev);
void (*resume)(struct pci_dev *dev);
void (*cor_error_detected)(struct pci_dev *dev);
};
The possible channel states are::
......@@ -422,5 +423,11 @@ That is, the recovery API only requires that:
- drivers/net/cxgb3
- drivers/net/s2io.c
The cor_error_detected() callback is invoked in handle_error_source() when
the error severity is "correctable". The callback is optional and allows
additional logging to be done if desired. See example:
- drivers/cxl/pci.c
The End
-------
......@@ -69,6 +69,7 @@ config X86
select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
......
......@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/cc_platform.h>
#include <linux/set_memory.h>
#include <linux/memregion.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
......@@ -330,6 +331,23 @@ void arch_invalidate_pmem(void *addr, size_t size)
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
bool cpu_cache_has_invalidate_memregion(void)
{
return !cpu_feature_enabled(X86_FEATURE_HYPERVISOR);
}
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, DEVMEM);
int cpu_cache_invalidate_memregion(int res_desc)
{
if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion()))
return -ENXIO;
wbinvd_on_all_cpus();
return 0;
}
EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, DEVMEM);
#endif
static void __cpa_flush_all(void *arg)
{
unsigned long cache = (unsigned long)arg;
......
......@@ -3,6 +3,7 @@
#include <linux/libnvdimm.h>
#include <linux/ndctl.h>
#include <linux/acpi.h>
#include <linux/memregion.h>
#include <asm/smp.h>
#include "intel.h"
#include "nfit.h"
......@@ -190,8 +191,6 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
}
}
static void nvdimm_invalidate_cache(void);
static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
......@@ -227,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
return -EIO;
}
/* DIMM unlocked, invalidate all CPU caches before we read it */
nvdimm_invalidate_cache();
return 0;
}
......@@ -297,8 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
if (!test_bit(cmd, &nfit_mem->dsm_mask))
return -ENOTTY;
/* flush all cache before we erase DIMM */
nvdimm_invalidate_cache();
memcpy(nd_cmd.cmd.passphrase, key->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
......@@ -317,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
return -ENXIO;
}
/* DIMM erased, invalidate all CPU caches before we read it */
nvdimm_invalidate_cache();
return 0;
}
......@@ -354,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
return -ENXIO;
}
/* flush all cache before we make the nvdimms available */
nvdimm_invalidate_cache();
return 0;
}
......@@ -380,8 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY;
/* flush all cache before we erase DIMM */
nvdimm_invalidate_cache();
memcpy(nd_cmd.cmd.passphrase, nkey->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
......@@ -401,22 +389,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
}
}
/*
* TODO: define a cross arch wbinvd equivalent when/if
* NVDIMM_FAMILY_INTEL command support arrives on another arch.
*/
#ifdef CONFIG_X86
static void nvdimm_invalidate_cache(void)
{
wbinvd_on_all_cpus();
}
#else
static void nvdimm_invalidate_cache(void)
{
WARN_ON_ONCE("cache invalidation required after unlock\n");
}
#endif
static const struct nvdimm_security_ops __intel_security_ops = {
.get_flags = intel_security_flags,
.freeze = intel_security_freeze,
......
......@@ -493,6 +493,7 @@ static u32 calculate_cxl_support(void)
u32 support;
support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT;
support |= OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT;
if (pci_aer_available())
support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT;
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
......
......@@ -111,4 +111,22 @@ config CXL_REGION
select MEMREGION
select GET_FREE_REGION
config CXL_REGION_INVALIDATION_TEST
bool "CXL: Region Cache Management Bypass (TEST)"
depends on CXL_REGION
help
CXL Region management and security operations potentially invalidate
the content of CPU caches without notifiying those caches to
invalidate the affected cachelines. The CXL Region driver attempts
to invalidate caches when those events occur. If that invalidation
fails the region will fail to enable. Reasons for cache
invalidation failure are due to the CPU not providing a cache
invalidation mechanism. For example usage of wbinvd is restricted to
bare metal x86. However, for testing purposes toggling this option
can disable that data integrity safety and proceed with enabling
regions when there might be conflicting contents in the CPU cache.
If unsure, or if this kernel is meant for production environments,
say N.
endif
......@@ -9,5 +9,5 @@ obj-$(CONFIG_CXL_PORT) += cxl_port.o
cxl_mem-y := mem.o
cxl_pci-y := pci.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o
cxl_pmem-y := pmem.o security.o
cxl_port-y := port.o
This diff is collapsed.
......@@ -58,14 +58,6 @@ extern struct rw_semaphore cxl_dpa_rwsem;
bool is_switch_decoder(struct device *dev);
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
if (!port)
return NULL;
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
......
......@@ -82,18 +82,23 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(14, 12);
}
static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
void __iomem *crb)
static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
struct cxl_component_regs *regs)
{
struct cxl_component_reg_map map;
struct cxl_register_map map = {
.resource = port->component_reg_phys,
.base = crb,
.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
};
cxl_probe_component_regs(&port->dev, crb, &map);
if (!map.hdm_decoder.valid) {
cxl_probe_component_regs(&port->dev, crb, &map.component_map);
if (!map.component_map.hdm_decoder.valid) {
dev_err(&port->dev, "HDM decoder registers invalid\n");
return IOMEM_ERR_PTR(-ENXIO);
return -ENXIO;
}
return crb + map.hdm_decoder.offset;
return cxl_map_component_regs(&port->dev, regs, &map,
BIT(CXL_CM_CAP_CAP_ID_HDM));
}
/**
......@@ -103,25 +108,25 @@ static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
{
struct device *dev = &port->dev;
void __iomem *crb, *hdm;
struct cxl_hdm *cxlhdm;
void __iomem *crb;
int rc;
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
cxlhdm->port = port;
crb = devm_cxl_iomap_block(dev, port->component_reg_phys,
CXL_COMPONENT_REG_BLOCK_SIZE);
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
if (!crb) {
dev_err(dev, "No component registers mapped\n");
return ERR_PTR(-ENXIO);
}
hdm = map_hdm_decoder_regs(port, crb);
if (IS_ERR(hdm))
return ERR_CAST(hdm);
cxlhdm->regs.hdm_decoder = hdm;
rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
iounmap(crb);
if (rc)
return ERR_PTR(rc);
parse_hdm_decoder_caps(cxlhdm);
if (cxlhdm->decoder_count == 0) {
......@@ -489,10 +494,10 @@ static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
* Input validation ensures these warns never fire, but otherwise
* suppress unititalized variable usage warnings.
*/
if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
"invalid interleave_ways: %d\n", cxld->interleave_ways))
return;
if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
"invalid interleave_granularity: %d\n",
cxld->interleave_granularity))
return;
......@@ -736,7 +741,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
cxld->target_type = CXL_DECODER_EXPANDER;
}
rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
&cxld->interleave_ways);
if (rc) {
dev_warn(&port->dev,
......@@ -744,7 +749,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id, ctrl);
return rc;
}
rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
&cxld->interleave_granularity);
if (rc)
return rc;
......
......@@ -140,13 +140,9 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
}
/**
* cxl_mbox_send_cmd() - Send a mailbox command to a device.
* cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
* @cxlds: The device data for the operation
* @opcode: Opcode for the mailbox command.
* @in: The input payload for the mailbox command.
* @in_size: The length of the input payload
* @out: Caller allocated buffer for the output.
* @out_size: Expected size of output.
* @mbox_cmd: initialized command to execute
*
* Context: Any context.
* Return:
......@@ -161,40 +157,40 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
*/
int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size)
int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *mbox_cmd)
{
const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
struct cxl_mbox_cmd mbox_cmd = {
.opcode = opcode,
.payload_in = in,
.size_in = in_size,
.size_out = out_size,
.payload_out = out,
};
size_t out_size, min_out;
int rc;
if (in_size > cxlds->payload_size || out_size > cxlds->payload_size)
if (mbox_cmd->size_in > cxlds->payload_size ||
mbox_cmd->size_out > cxlds->payload_size)
return -E2BIG;
rc = cxlds->mbox_send(cxlds, &mbox_cmd);
out_size = mbox_cmd->size_out;
min_out = mbox_cmd->min_out;
rc = cxlds->mbox_send(cxlds, mbox_cmd);
if (rc)
return rc;
if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS)
return cxl_mbox_cmd_rc2errno(&mbox_cmd);
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
return cxl_mbox_cmd_rc2errno(mbox_cmd);
if (!out_size)
return 0;
/*
* Variable sized commands can't be validated and so it's up to the
* caller to do that if they wish.
* Variable sized output needs to at least satisfy the caller's
* minimum if not the fully requested size.
*/
if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) {
if (mbox_cmd.size_out != out_size)
if (min_out == 0)
min_out = out_size;
if (mbox_cmd->size_out < min_out)
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
static bool cxl_mem_raw_command_allowed(u16 opcode)
{
......@@ -561,15 +557,25 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8
while (remaining) {
u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
struct cxl_mbox_get_log log = {
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_get_log log;
int rc;
log = (struct cxl_mbox_get_log) {
.uuid = *uuid,
.offset = cpu_to_le32(offset),
.length = cpu_to_le32(xfer_size)
.length = cpu_to_le32(xfer_size),
};
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
out, xfer_size);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_LOG,
.size_in = sizeof(log),
.payload_in = &log,
.size_out = xfer_size,
.payload_out = out,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc < 0)
return rc;
......@@ -615,19 +621,27 @@ static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *ret;
struct cxl_mbox_cmd mbox_cmd;
int rc;
ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
cxlds->payload_size);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
.size_out = cxlds->payload_size,
.payload_out = ret,
/* At least the record number field must be valid */
.min_out = 2,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
}
return ret;
}
......@@ -697,7 +711,6 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
/* Found the required CEL */
rc = 0;
}
out:
kvfree(gsl);
return rc;
......@@ -719,11 +732,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_partition_info pi;
struct cxl_mbox_cmd mbox_cmd;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
&pi, sizeof(pi));
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
.size_out = sizeof(pi),
.payload_out = &pi,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc)
return rc;
......@@ -752,10 +769,15 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
{
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
struct cxl_mbox_cmd mbox_cmd;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
sizeof(id));
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_IDENTIFY,
.size_out = sizeof(id),
.payload_out = &id,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc < 0)
return rc;
......
......@@ -344,6 +344,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
* needed as this is ordered with cdev_add() publishing the device.
*/
cxlmd->cxlds = cxlds;
cxlds->cxlmd = cxlmd;
cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
......
......@@ -54,16 +54,13 @@ static int match_add_dports(struct pci_dev *pdev, void *data)
dev_dbg(&port->dev, "failed to find component registers\n");
port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
dport = devm_cxl_add_dport(port, &pdev->dev, port_num,
cxl_regmap_to_base(pdev, &map));
dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
if (IS_ERR(dport)) {
ctx->error = PTR_ERR(dport);
return PTR_ERR(dport);
}
ctx->count++;
dev_dbg(&port->dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
return 0;
}
......
......@@ -99,7 +99,6 @@ static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
dev = &cxl_nvb->dev;
cxl_nvb->port = port;
cxl_nvb->state = CXL_NVB_NEW;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
device_set_pm_not_required(dev);
......@@ -117,28 +116,7 @@ static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
static void unregister_nvb(void *_cxl_nvb)
{
struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
bool flush;
/*
* If the bridge was ever activated then there might be in-flight state
* work to flush. Once the state has been changed to 'dead' then no new
* work can be queued by user-triggered bind.
*/
device_lock(&cxl_nvb->dev);
flush = cxl_nvb->state != CXL_NVB_NEW;
cxl_nvb->state = CXL_NVB_DEAD;
device_unlock(&cxl_nvb->dev);
/*
* Even though the device core will trigger device_release_driver()
* before the unregister, it does not know about the fact that
* cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
* release not and flush it before tearing down the nvdimm device
* hierarchy.
*/
device_release_driver(&cxl_nvb->dev);
if (flush)
flush_work(&cxl_nvb->state_work);
device_unregister(&cxl_nvb->dev);
}
......@@ -188,7 +166,6 @@ static void cxl_nvdimm_release(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
xa_destroy(&cxl_nvd->pmem_regions);
kfree(cxl_nvd);
}
......@@ -220,7 +197,8 @@ EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
static struct lock_class_key cxl_nvdimm_key;
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm *cxl_nvd;
struct device *dev;
......@@ -231,38 +209,78 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
dev = &cxl_nvd->dev;
cxl_nvd->cxlmd = cxlmd;
xa_init(&cxl_nvd->pmem_regions);
cxlmd->cxl_nvd = cxl_nvd;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
device_set_pm_not_required(dev);
dev->parent = &cxlmd->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_nvdimm_type;
/*
* A "%llx" string is 17-bytes vs dimm_id that is max
* NVDIMM_KEY_DESC_LEN
*/
BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
return cxl_nvd;
}
static void cxl_nvd_unregister(void *dev)
static void cxl_nvd_unregister(void *_cxl_nvd)
{
device_unregister(dev);
struct cxl_nvdimm *cxl_nvd = _cxl_nvd;
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
/*
* Either the bridge is in ->remove() context under the device_lock(),
* or cxlmd_release_nvdimm() is cancelling the bridge's release action
* for @cxl_nvd and doing it itself (while manually holding the bridge
* lock).
*/
device_lock_assert(&cxl_nvb->dev);
cxl_nvd->cxlmd = NULL;
cxlmd->cxl_nvd = NULL;
device_unregister(&cxl_nvd->dev);
}
static void cxlmd_release_nvdimm(void *_cxlmd)
{
struct cxl_memdev *cxlmd = _cxlmd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
device_lock(&cxl_nvb->dev);
if (cxlmd->cxl_nvd)
devm_release_action(&cxl_nvb->dev, cxl_nvd_unregister,
cxlmd->cxl_nvd);
device_unlock(&cxl_nvb->dev);
put_device(&cxl_nvb->dev);
}
/**
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
* @host: same host as @cxlmd
* @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
*
* Return: 0 on success negative error code on failure.
*/
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
struct device *dev;
int rc;
cxl_nvd = cxl_nvdimm_alloc(cxlmd);
if (IS_ERR(cxl_nvd))
return PTR_ERR(cxl_nvd);
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
if (!cxl_nvb)
return -ENODEV;
cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
if (IS_ERR(cxl_nvd)) {
rc = PTR_ERR(cxl_nvd);
goto err_alloc;
}
cxlmd->cxl_nvb = cxl_nvb;
dev = &cxl_nvd->dev;
rc = dev_set_name(dev, "pmem%d", cxlmd->id);
......@@ -273,13 +291,34 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
if (rc)
goto err;
dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
/*
* The two actions below arrange for @cxl_nvd to be deleted when either
* the top-level PMEM bridge goes down, or the endpoint device goes
* through ->remove().
*/
device_lock(&cxl_nvb->dev);
if (cxl_nvb->dev.driver)
rc = devm_add_action_or_reset(&cxl_nvb->dev, cxl_nvd_unregister,
cxl_nvd);
else
rc = -ENXIO;
device_unlock(&cxl_nvb->dev);
return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
if (rc)
goto err_alloc;
/* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);
err:
put_device(dev);
err_alloc:
cxlmd->cxl_nvb = NULL;
cxlmd->cxl_nvd = NULL;
put_device(&cxl_nvb->dev);
return rc;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
This diff is collapsed.
......@@ -324,7 +324,7 @@ static ssize_t interleave_ways_store(struct device *dev,
if (rc)
return rc;
rc = ways_to_cxl(val, &iw);
rc = ways_to_eiw(val, &iw);
if (rc)
return rc;
......@@ -391,7 +391,7 @@ static ssize_t interleave_granularity_store(struct device *dev,
if (rc)
return rc;
rc = granularity_to_cxl(val, &ig);
rc = granularity_to_eig(val, &ig);
if (rc)
return rc;
......@@ -1028,7 +1028,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
parent_iw = parent_cxld->interleave_ways;
}
rc = granularity_to_cxl(parent_ig, &peig);
rc = granularity_to_eig(parent_ig, &peig);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
dev_name(parent_port->uport),
......@@ -1036,7 +1036,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return rc;
}
rc = ways_to_cxl(parent_iw, &peiw);
rc = ways_to_eiw(parent_iw, &peiw);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
dev_name(parent_port->uport),
......@@ -1045,7 +1045,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
}
iw = cxl_rr->nr_targets;
rc = ways_to_cxl(iw, &eiw);
rc = ways_to_eiw(iw, &eiw);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
dev_name(port->uport), dev_name(&port->dev), iw);
......@@ -1065,7 +1065,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
eig = peig;
}
rc = cxl_to_granularity(eig, &ig);
rc = eig_to_granularity(eig, &ig);
if (rc) {
dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
dev_name(port->uport), dev_name(&port->dev),
......@@ -1226,7 +1226,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled_target;
struct cxl_memdev *cxlmd_target;
cxled_target = p->targets[pos];
cxled_target = p->targets[i];
if (!cxled_target)
continue;
......@@ -1403,6 +1403,8 @@ static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
goto out;
down_read(&cxl_dpa_rwsem);
rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
if (rc == 0)
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
out:
......@@ -1812,6 +1814,7 @@ static struct lock_class_key cxl_pmem_region_key;
static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem;
struct device *dev;
int i;
......@@ -1839,6 +1842,18 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
/*
* Regions never span CXL root devices, so by definition the
* bridge for one device is the same for all.
*/
if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
if (!cxl_nvb) {
cxlr_pmem = ERR_PTR(-ENODEV);
goto out;
}
cxlr->cxl_nvb = cxl_nvb;
}
m->cxlmd = cxlmd;
get_device(&cxlmd->dev);
m->start = cxled->dpa_res->start;
......@@ -1848,6 +1863,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
dev = &cxlr_pmem->dev;
cxlr_pmem->cxlr = cxlr;
cxlr->cxlr_pmem = cxlr_pmem;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
device_set_pm_not_required(dev);
......@@ -1860,9 +1876,36 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
return cxlr_pmem;
}
static void cxlr_pmem_unregister(void *dev)
static void cxlr_pmem_unregister(void *_cxlr_pmem)
{
device_unregister(dev);
struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
struct cxl_region *cxlr = cxlr_pmem->cxlr;
struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
/*
* Either the bridge is in ->remove() context under the device_lock(),
* or cxlr_release_nvdimm() is cancelling the bridge's release action
* for @cxlr_pmem and doing it itself (while manually holding the bridge
* lock).
*/
device_lock_assert(&cxl_nvb->dev);
cxlr->cxlr_pmem = NULL;
cxlr_pmem->cxlr = NULL;
device_unregister(&cxlr_pmem->dev);
}
static void cxlr_release_nvdimm(void *_cxlr)
{
struct cxl_region *cxlr = _cxlr;
struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
device_lock(&cxl_nvb->dev);
if (cxlr->cxlr_pmem)
devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
cxlr->cxlr_pmem);
device_unlock(&cxl_nvb->dev);
cxlr->cxl_nvb = NULL;
put_device(&cxl_nvb->dev);
}
/**
......@@ -1874,12 +1917,14 @@ static void cxlr_pmem_unregister(void *dev)
static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
{
struct cxl_pmem_region *cxlr_pmem;
struct cxl_nvdimm_bridge *cxl_nvb;
struct device *dev;
int rc;
cxlr_pmem = cxl_pmem_region_alloc(cxlr);
if (IS_ERR(cxlr_pmem))
return PTR_ERR(cxlr_pmem);
cxl_nvb = cxlr->cxl_nvb;
dev = &cxlr_pmem->dev;
rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
......@@ -1893,13 +1938,52 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
device_lock(&cxl_nvb->dev);
if (cxl_nvb->dev.driver)
rc = devm_add_action_or_reset(&cxl_nvb->dev,
cxlr_pmem_unregister, cxlr_pmem);
else
rc = -ENXIO;
device_unlock(&cxl_nvb->dev);
if (rc)
goto err_bridge;
/* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
err:
put_device(dev);
err_bridge:
put_device(&cxl_nvb->dev);
cxlr->cxl_nvb = NULL;
return rc;
}
static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
{
if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
return 0;
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
dev_warn(
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
return 0;
} else {
dev_err(&cxlr->dev,
"Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
return 0;
}
static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
......@@ -1915,14 +1999,21 @@ static int cxl_region_probe(struct device *dev)
if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
rc = -ENXIO;
goto out;
}
rc = cxl_region_invalidate_memregion(cxlr);
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/
out:
up_read(&cxl_region_rwsem);
if (rc)
return rc;
switch (cxlr->mode) {
case CXL_DECODER_PMEM:
return devm_cxl_add_pmem_region(cxlr);
......@@ -1950,4 +2041,5 @@ void cxl_region_exit(void)
}
MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(DEVMEM);
MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
This diff is collapsed.
This diff is collapsed.
......@@ -35,6 +35,8 @@
* @cdev: char dev core object for ioctl operations
* @cxlds: The device state backing this device
* @detach_work: active memdev lost a port in its ancestry
* @cxl_nvb: coordinate removal of @cxl_nvd if present
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
......@@ -42,6 +44,8 @@ struct cxl_memdev {
struct cdev cdev;
struct cxl_dev_state *cxlds;
struct work_struct detach_work;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
int id;
};
......@@ -76,6 +80,15 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
if (!port)
return NULL;
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
/**
* struct cxl_mbox_cmd - A command to be submitted to hardware.
* @opcode: (input) The command set and command submitted to hardware.
......@@ -88,6 +101,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
* outputs commands this is always expected to be deterministic. For
* variable sized output commands, it tells the exact number of bytes
* written.
* @min_out: (input) internal command output payload size validation
* @return_code: (output) Error code returned from hardware.
*
* This is the primary mechanism used to send commands to the hardware.
......@@ -102,6 +116,7 @@ struct cxl_mbox_cmd {
void *payload_out;
size_t size_in;
size_t size_out;
size_t min_out;
u16 return_code;
};
......@@ -186,8 +201,10 @@ struct cxl_endpoint_dvsec_info {
* Currently only memory devices are represented.
*
* @dev: The device associated with this CXL state
* @cxlmd: The device representing the CXL.mem capabilities of @dev
* @regs: Parsed register blocks
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area
......@@ -218,10 +235,12 @@ struct cxl_endpoint_dvsec_info {
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
size_t payload_size;
size_t lsa_size;
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
......@@ -273,6 +292,12 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500,
CXL_MBOX_OP_SET_PASSPHRASE = 0x4501,
CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502,
CXL_MBOX_OP_UNLOCK = 0x4503,
CXL_MBOX_OP_FREEZE_SECURITY = 0x4504,
CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE = 0x4505,
CXL_MBOX_OP_MAX = 0x10000
};
......@@ -372,8 +397,43 @@ struct cxl_mem_command {
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size);
#define CXL_PMEM_SEC_STATE_USER_PASS_SET 0x01
#define CXL_PMEM_SEC_STATE_MASTER_PASS_SET 0x02
#define CXL_PMEM_SEC_STATE_LOCKED 0x04
#define CXL_PMEM_SEC_STATE_FROZEN 0x08
#define CXL_PMEM_SEC_STATE_USER_PLIMIT 0x10
#define CXL_PMEM_SEC_STATE_MASTER_PLIMIT 0x20
/* set passphrase input payload */
struct cxl_set_pass {
u8 type;
u8 reserved[31];
/* CXL field using NVDIMM define, same length */
u8 old_pass[NVDIMM_PASSPHRASE_LEN];
u8 new_pass[NVDIMM_PASSPHRASE_LEN];
} __packed;
/* disable passphrase input payload */
struct cxl_disable_pass {
u8 type;
u8 reserved[31];
u8 pass[NVDIMM_PASSPHRASE_LEN];
} __packed;
/* passphrase secure erase payload */
struct cxl_pass_erase {
u8 type;
u8 reserved[31];
u8 pass[NVDIMM_PASSPHRASE_LEN];
} __packed;
enum {
CXL_PMEM_SEC_PASS_MASTER = 0,
CXL_PMEM_SEC_PASS_USER,
};
int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *cmd);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
......
......@@ -62,15 +62,6 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES
};
static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
struct cxl_register_map *map)
{
if (map->block_offset == U64_MAX)
return CXL_RESOURCE_NONE;
return pci_resource_start(pdev, map->barno) + map->block_offset;
}
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
......
......@@ -45,9 +45,60 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data)
return 0;
}
static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
struct cxl_dport *parent_dport)
{
struct cxl_port *parent_port = parent_dport->port;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint, *iter, *down;
resource_size_t component_reg_phys;
int rc;
/*
* Now that the path to the root is established record all the
* intervening ports in the chain.
*/
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
down = iter, iter = to_cxl_port(iter->dev.parent)) {
struct cxl_ep *ep;
ep = cxl_ep_load(iter, cxlmd);
ep->next = down;
}
/*
* The component registers for an RCD might come from the
* host-bridge RCRB if they are not already mapped via the
* typical register locator mechanism.
*/
if (parent_dport->rch && cxlds->component_reg_phys == CXL_RESOURCE_NONE)
component_reg_phys = cxl_rcrb_to_component(
&cxlmd->dev, parent_dport->rcrb, CXL_RCRB_UPSTREAM);
else
component_reg_phys = cxlds->component_reg_phys;
endpoint = devm_cxl_add_port(host, &cxlmd->dev, component_reg_phys,
parent_dport);
if (IS_ERR(endpoint))
return PTR_ERR(endpoint);
rc = cxl_endpoint_autoremove(cxlmd, endpoint);
if (rc)
return rc;
if (!endpoint->dev.driver) {
dev_err(&cxlmd->dev, "%s failed probe\n",
dev_name(&endpoint->dev));
return -ENXIO;
}
return 0;
}
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *endpoint_parent;
struct cxl_port *parent_port;
struct cxl_dport *dport;
struct dentry *dentry;
......@@ -80,21 +131,34 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
if (dport->rch)
endpoint_parent = parent_port->uport;
else
endpoint_parent = &parent_port->dev;
device_lock(endpoint_parent);
if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
dev_name(&parent_port->dev));
dev_name(endpoint_parent));
rc = -ENXIO;
goto unlock;
}
rc = devm_cxl_add_endpoint(cxlmd, dport);
rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
unlock:
device_unlock(&parent_port->dev);
device_unlock(endpoint_parent);
put_device(&parent_port->dev);
if (rc)
return rc;
if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(cxlmd);
if (rc == -ENODEV)
dev_info(dev, "PMEM disabled by platform\n");
else
return rc;
}
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
......
......@@ -9,10 +9,13 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
#include <linux/aer.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
#include "cxl.h"
#define CREATE_TRACE_POINTS
#include <trace/events/cxl.h>
/**
* DOC: cxl pci
......@@ -276,35 +279,22 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
{
void __iomem *addr;
int bar = map->barno;
struct device *dev = &pdev->dev;
resource_size_t offset = map->block_offset;
/* Basic sanity check that BAR is big enough */
if (pci_resource_len(pdev, bar) < offset) {
dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar,
&pdev->resource[bar], &offset);
return -ENXIO;
}
addr = pci_iomap(pdev, bar, 0);
if (!addr) {
map->base = ioremap(map->resource, map->max_size);
if (!map->base) {
dev_err(dev, "failed to map registers\n");
return -ENOMEM;
}
dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n",
bar, &offset);
map->base = addr + map->block_offset;
dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
return 0;
}
static void cxl_unmap_regblock(struct pci_dev *pdev,
struct cxl_register_map *map)
{
pci_iounmap(pdev, map->base - map->block_offset);
iounmap(map->base);
map->base = NULL;
}
......@@ -324,6 +314,9 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
return -ENXIO;
}
if (!comp_map->ras.valid)
dev_dbg(dev, "RAS registers not found\n");
dev_dbg(dev, "Set up component registers\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
......@@ -347,27 +340,6 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
return 0;
}
static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map)
{
struct device *dev = cxlds->dev;
struct pci_dev *pdev = to_pci_dev(dev);
switch (map->reg_type) {
case CXL_REGLOC_RBI_COMPONENT:
cxl_map_component_regs(pdev, &cxlds->regs.component, map);
dev_dbg(dev, "Mapping component registers...\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map);
dev_dbg(dev, "Probing device registers...\n");
break;
default:
break;
}
return 0;
}
static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map)
{
......@@ -418,6 +390,11 @@ static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
continue;
}
if (!pci_request_config_region_exclusive(pdev, off,
PCI_DOE_CAP_SIZEOF,
dev_name(dev)))
pci_err(pdev, "Failed to exclude DOE registers\n");
if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
dev_err(dev, "xa_insert failed to insert MB @ %x\n",
off);
......@@ -428,6 +405,20 @@ static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
}
}
/*
* Assume that any RCIEP that emits the CXL memory expander class code
* is an RCD
*/
static bool is_cxl_restricted(struct pci_dev *pdev)
{
return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
static void disable_aer(void *pdev)
{
pci_disable_pcie_error_reporting(pdev);
}
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
......@@ -449,7 +440,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds = cxl_dev_state_create(&pdev->dev);
if (IS_ERR(cxlds))
return PTR_ERR(cxlds);
pci_set_drvdata(pdev, cxlds);
cxlds->rcd = is_cxl_restricted(pdev);
cxlds->serial = pci_get_dsn(pdev);
cxlds->cxl_dvsec = pci_find_dvsec_capability(
pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
......@@ -461,7 +454,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
rc = cxl_map_regs(cxlds, &map);
rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map);
if (rc)
return rc;
......@@ -474,10 +467,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
cxlds->component_reg_phys = map.resource;
devm_cxl_pci_create_doe(cxlds);
rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
&map, BIT(CXL_CM_CAP_CAP_ID_RAS));
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
......@@ -498,8 +496,13 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
if (cxlds->regs.ras) {
pci_enable_pcie_error_reporting(pdev);
rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev);
if (rc)
return rc;
}
pci_save_state(pdev);
return rc;
}
......@@ -511,10 +514,151 @@ static const struct pci_device_id cxl_mem_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
/* CXL spec rev3.0 8.2.4.16.1 */
static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
{
void __iomem *addr;
u32 *log_addr;
int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
log_addr = log;
for (i = 0; i < log_u32_size; i++) {
*log_addr = readl(addr);
log_addr++;
addr += sizeof(u32);
}
}
/*
* Log the state of the RAS status registers and prepare them to log the
* next error status. Return 1 if reset needed.
*/
static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
u32 hl[CXL_HEADERLOG_SIZE_U32];
void __iomem *addr;
u32 status;
u32 fe;
if (!cxlds->regs.ras)
return false;
addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
return false;
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(addr)));
} else {
fe = status;
}
header_log_copy(cxlds, hl);
trace_cxl_aer_uncorrectable_error(dev, status, fe, hl);
writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
return true;
}
static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
bool ue;
/*
* A frozen channel indicates an impending reset which is fatal to
* CXL.mem operation, and will likely crash the system. On the off
* chance the situation is recoverable dump the status of the RAS
* capability registers and bounce the active state of the memdev.
*/
ue = cxl_report_and_clear(cxlds);
switch (state) {
case pci_channel_io_normal:
if (ue) {
device_release_driver(dev);
return PCI_ERS_RESULT_NEED_RESET;
}
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
dev_warn(&pdev->dev,
"%s: frozen state error detected, disable CXL.mem\n",
dev_name(dev));
device_release_driver(dev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_warn(&pdev->dev,
"failure state error detected, request disconnect\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
dev_name(dev));
pci_restore_state(pdev);
if (device_attach(dev) <= 0)
return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_RECOVERED;
}
static void cxl_error_resume(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
dev->driver ? "successful" : "failed");
}
static void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
void __iomem *addr;
u32 status;
if (!cxlds->regs.ras)
return;
addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
trace_cxl_aer_correctable_error(dev, status);
}
}
static const struct pci_error_handlers cxl_error_handlers = {
.error_detected = cxl_error_detected,
.slot_reset = cxl_slot_reset,
.resume = cxl_error_resume,
.cor_error_detected = cxl_cor_error_detected,
};
static struct pci_driver cxl_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = cxl_mem_pci_tbl,
.probe = cxl_pci_probe,
.err_handler = &cxl_error_handlers,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/libnvdimm.h>
#include <asm/unaligned.h>
#include <linux/module.h>
#include <linux/async.h>
#include <linux/slab.h>
#include <linux/memregion.h>
#include "cxlmem.h"
#include "cxl.h"
static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
unsigned long security_flags = 0;
struct cxl_get_security_output {
__le32 flags;
} out;
struct cxl_mbox_cmd mbox_cmd;
u32 sec_out;
int rc;
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
.size_out = sizeof(out),
.payload_out = &out,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc < 0)
return 0;
sec_out = le32_to_cpu(out.flags);
if (ptype == NVDIMM_MASTER) {
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
else
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT)
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
return security_flags;
}
if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
if (sec_out & CXL_PMEM_SEC_STATE_FROZEN ||
sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT)
set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
if (sec_out & CXL_PMEM_SEC_STATE_LOCKED)
set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
else
set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
} else {
set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
}
return security_flags;
}
static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
const struct nvdimm_key_data *old_data,
const struct nvdimm_key_data *new_data,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_set_pass set_pass;
set_pass = (struct cxl_set_pass) {
.type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER :
CXL_PMEM_SEC_PASS_USER,
};
memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN);
memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_SET_PASSPHRASE,
.size_in = sizeof(set_pass),
.payload_in = &set_pass,
};
return cxl_internal_send_cmd(cxlds, &mbox_cmd);
}
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_disable_pass dis_pass;
struct cxl_mbox_cmd mbox_cmd;
dis_pass = (struct cxl_disable_pass) {
.type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER :
CXL_PMEM_SEC_PASS_USER,
};
memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_DISABLE_PASSPHRASE,
.size_in = sizeof(dis_pass),
.payload_in = &dis_pass,
};
return cxl_internal_send_cmd(cxlds, &mbox_cmd);
}
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_USER);
}
static int cxl_pmem_security_disable_master(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_MASTER);
}
static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_FREEZE_SECURITY,
};
return cxl_internal_send_cmd(cxlds, &mbox_cmd);
}
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
u8 pass[NVDIMM_PASSPHRASE_LEN];
struct cxl_mbox_cmd mbox_cmd;
int rc;
memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_UNLOCK,
.size_in = NVDIMM_PASSPHRASE_LEN,
.payload_in = pass,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc < 0)
return rc;
return 0;
}
static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key,
enum nvdimm_passphrase_type ptype)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_pass_erase erase;
int rc;
erase = (struct cxl_pass_erase) {
.type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER :
CXL_PMEM_SEC_PASS_USER,
};
memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE,
.size_in = sizeof(erase),
.payload_in = &erase,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
if (rc < 0)
return rc;
return 0;
}
static const struct nvdimm_security_ops __cxl_security_ops = {
.get_flags = cxl_pmem_get_security_flags,
.change_key = cxl_pmem_security_change_key,
.disable = cxl_pmem_security_disable,
.freeze = cxl_pmem_security_freeze,
.unlock = cxl_pmem_security_unlock,
.erase = cxl_pmem_security_passphrase_erase,
.disable_master = cxl_pmem_security_disable_master,
};
const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops;
......@@ -114,4 +114,16 @@ config NVDIMM_TEST_BUILD
core devm_memremap_pages() implementation and other
infrastructure.
config NVDIMM_SECURITY_TEST
bool "Enable NVDIMM security unit tests"
depends on NVDIMM_KEYS
help
The NVDIMM and CXL subsystems support unit testing of their device
security state machines. The NVDIMM_SECURITY_TEST option disables CPU
cache maintenance operations around events like secure erase and
overwrite. Also, when enabled, the NVDIMM subsystem core helps the unit
test implement a mock state machine.
Select N if unsure.
endif
......@@ -349,11 +349,18 @@ static ssize_t available_slots_show(struct device *dev,
}
static DEVICE_ATTR_RO(available_slots);
__weak ssize_t security_show(struct device *dev,
ssize_t security_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
/*
* For the test version we need to poll the "hardware" in order
* to get the updated status for unlock testing.
*/
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
return sprintf(buf, "overwrite\n");
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
......
......@@ -2,6 +2,7 @@
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/memregion.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/device.h>
......@@ -100,6 +101,16 @@ static void nd_region_remove(struct device *dev)
*/
sysfs_put(nd_region->bb_state);
nd_region->bb_state = NULL;
/*
* Try to flush caches here since a disabled region may be subject to
* secure erase while disabled, and previous dirty data should not be
* written back to a new instance of the region. This only matters on
* bare metal where security commands are available, so silent failure
* here is ok.
*/
if (cpu_cache_has_invalidate_memregion())
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
}
static int child_notify(struct device *dev, void *data)
......
......@@ -59,9 +59,51 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
return 0;
}
static int nd_region_invalidate_memregion(struct nd_region *nd_region)
{
int i, incoherent = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
incoherent++;
break;
}
}
if (!incoherent)
return 0;
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
dev_warn(
&nd_region->dev,
"Bypassing cpu_cache_invalidate_memergion() for testing!\n");
goto out;
} else {
dev_err(&nd_region->dev,
"Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
out:
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
clear_bit(NDD_INCOHERENT, &nvdimm->flags);
}
return 0;
}
int nd_region_activate(struct nd_region *nd_region)
{
int i, j, num_flush = 0;
int i, j, rc, num_flush = 0;
struct nd_region_data *ndrd;
struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
......@@ -85,6 +127,10 @@ int nd_region_activate(struct nd_region *nd_region)
}
nvdimm_bus_unlock(&nd_region->dev);
rc = nd_region_invalidate_memregion(nd_region);
if (rc)
return rc;
ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
if (!ndrd)
return -ENOMEM;
......@@ -1222,3 +1268,5 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
MODULE_IMPORT_NS(DEVMEM);
......@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
|| !nvdimm->sec.flags)
return -EIO;
/* cxl_test needs this to pre-populate the security state */
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
/* No need to go further if security is disabled */
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return 0;
......@@ -204,6 +208,8 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
......@@ -239,7 +245,8 @@ static int check_security_state(struct nvdimm *nvdimm)
return 0;
}
static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
static int security_disable(struct nvdimm *nvdimm, unsigned int keyid,
enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
......@@ -250,8 +257,13 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
|| !nvdimm->sec.flags)
if (!nvdimm->sec.ops || !nvdimm->sec.flags)
return -EOPNOTSUPP;
if (pass_type == NVDIMM_USER && !nvdimm->sec.ops->disable)
return -EOPNOTSUPP;
if (pass_type == NVDIMM_MASTER && !nvdimm->sec.ops->disable_master)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
......@@ -263,11 +275,20 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
if (!data)
return -ENOKEY;
if (pass_type == NVDIMM_MASTER) {
rc = nvdimm->sec.ops->disable_master(nvdimm, data);
dev_dbg(dev, "key: %d disable_master: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
} else {
rc = nvdimm->sec.ops->disable(nvdimm, data);
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
}
nvdimm_put_key(key);
if (pass_type == NVDIMM_MASTER)
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
else
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
......@@ -355,6 +376,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return -ENOKEY;
rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
......@@ -389,6 +412,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -ENOKEY;
rc = nvdimm->sec.ops->overwrite(nvdimm, data);
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
......@@ -473,6 +498,7 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
#define OPS \
C( OP_FREEZE, "freeze", 1), \
C( OP_DISABLE, "disable", 2), \
C( OP_DISABLE_MASTER, "disable_master", 2), \
C( OP_UPDATE, "update", 3), \
C( OP_ERASE, "erase", 2), \
C( OP_OVERWRITE, "overwrite", 2), \
......@@ -524,7 +550,10 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
rc = nvdimm_security_freeze(nvdimm);
} else if (i == OP_DISABLE) {
dev_dbg(dev, "disable %u\n", key);
rc = security_disable(nvdimm, key);
rc = security_disable(nvdimm, key, NVDIMM_USER);
} else if (i == OP_DISABLE_MASTER) {
dev_dbg(dev, "disable_master %u\n", key);
rc = security_disable(nvdimm, key, NVDIMM_MASTER);
} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
......
......@@ -756,6 +756,13 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
count)) {
pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
current->comm, off);
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
}
if (off > dev->cfg_size)
return 0;
if (off + count > dev->cfg_size) {
......
......@@ -961,8 +961,14 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
if (aer)
pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
info->status);
if (pcie_aer_is_native(dev))
if (pcie_aer_is_native(dev)) {
struct pci_driver *pdrv = dev->driver;
if (pdrv && pdrv->err_handler &&
pdrv->err_handler->cor_error_detected)
pdrv->err_handler->cor_error_detected(dev);
pcie_clear_device_status(dev);
}
} else if (info->severity == AER_NONFATAL)
pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
......
......@@ -2305,6 +2305,12 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
INIT_LIST_HEAD(&dev->bus_list);
dev->dev.type = &pci_dev_type;
dev->bus = pci_bus_get(bus);
dev->driver_exclusive_resource = (struct resource) {
.name = "PCI Exclusive",
.start = 0,
.end = -1,
};
#ifdef CONFIG_PCI_MSI
raw_spin_lock_init(&dev->msi_lock);
#endif
......
......@@ -318,6 +318,8 @@ extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern bool iomem_is_exclusive(u64 addr);
extern bool resource_is_exclusive(struct resource *resource, u64 addr,
resource_size_t size);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
......
......@@ -35,6 +35,11 @@ enum {
NDD_WORK_PENDING = 4,
/* dimm supports namespace labels */
NDD_LABELING = 6,
/*
* dimm contents have changed requiring invalidation of CPU caches prior
* to activation of a region that includes this device
*/
NDD_INCOHERENT = 7,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
......@@ -183,6 +188,8 @@ struct nvdimm_security_ops {
int (*overwrite)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
int (*query_overwrite)(struct nvdimm *nvdimm);
int (*disable_master)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
};
enum nvdimm_fwa_state {
......
......@@ -3,6 +3,7 @@
#define _MEMREGION_H_
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
struct memregion_info {
int target_node;
......@@ -20,4 +21,41 @@ static inline void memregion_free(int id)
{
}
#endif
/**
* cpu_cache_invalidate_memregion - drop any CPU cached data for
* memregions described by @res_desc
* @res_desc: one of the IORES_DESC_* types
*
* Perform cache maintenance after a memory event / operation that
* changes the contents of physical memory in a cache-incoherent manner.
* For example, device memory technologies like NVDIMM and CXL have
* device secure erase, and dynamic region provision that can replace
* the memory mapped to a given physical address.
*
* Limit the functionality to architectures that have an efficient way
* to writeback and invalidate potentially terabytes of address space at
* once. Note that this routine may or may not write back any dirty
* contents while performing the invalidation. It is only exported for
* the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM'
* symbol namespace on bare platforms.
*
* Returns 0 on success or negative error code on a failure to perform
* the cache maintenance.
*/
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
int cpu_cache_invalidate_memregion(int res_desc);
bool cpu_cache_has_invalidate_memregion(void);
#else
static inline bool cpu_cache_has_invalidate_memregion(void)
{
return false;
}
static inline int cpu_cache_invalidate_memregion(int res_desc)
{
WARN_ON_ONCE("CPU cache invalidation required");
return -ENXIO;
}
#endif
#endif /* _MEMREGION_H_ */
......@@ -410,6 +410,7 @@ struct pci_dev {
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
bool match_driver; /* Skip attaching driver */
......@@ -844,6 +845,9 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
/* Allow device driver to record more details of a correctable error */
void (*cor_error_detected)(struct pci_dev *dev);
};
......@@ -1408,6 +1412,21 @@ int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
unsigned int len, const char *name)
{
return __request_region(&pdev->driver_exclusive_resource, offset, len,
name, IORESOURCE_EXCLUSIVE);
}
static inline void pci_release_config_region(struct pci_dev *pdev,
unsigned int offset,
unsigned int len)
{
__release_region(&pdev->driver_exclusive_resource, offset, len);
}
/* drivers/pci/bus.c */
void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
......@@ -2505,6 +2524,7 @@ void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
......
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cxl
#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _CXL_EVENTS_H
#include <linux/tracepoint.h>
#define CXL_HEADERLOG_SIZE SZ_512
#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0)
#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1)
#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2)
#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3)
#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4)
#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5)
#define CXL_RAS_UC_MEM_BE_PARITY BIT(6)
#define CXL_RAS_UC_MEM_DATA_ECC BIT(7)
#define CXL_RAS_UC_REINIT_THRESH BIT(8)
#define CXL_RAS_UC_RSVD_ENCODE BIT(9)
#define CXL_RAS_UC_POISON BIT(10)
#define CXL_RAS_UC_RECV_OVERFLOW BIT(11)
#define CXL_RAS_UC_INTERNAL_ERR BIT(14)
#define CXL_RAS_UC_IDE_TX_ERR BIT(15)
#define CXL_RAS_UC_IDE_RX_ERR BIT(16)
#define show_uc_errs(status) __print_flags(status, " | ", \
{ CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \
{ CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \
{ CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \
{ CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
{ CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \
{ CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \
{ CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \
{ CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \
{ CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \
{ CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \
{ CXL_RAS_UC_POISON, "Received Poison From Peer" }, \
{ CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \
{ CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \
{ CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \
{ CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
)
TRACE_EVENT(cxl_aer_uncorrectable_error,
TP_PROTO(const struct device *dev, u32 status, u32 fe, u32 *hl),
TP_ARGS(dev, status, fe, hl),
TP_STRUCT__entry(
__string(dev_name, dev_name(dev))
__field(u32, status)
__field(u32, first_error)
__array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
),
TP_fast_assign(
__assign_str(dev_name, dev_name(dev));
__entry->status = status;
__entry->first_error = fe;
/*
* Embed the 512B headerlog data for user app retrieval and
* parsing, but no need to print this in the trace buffer.
*/
memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
),
TP_printk("%s: status: '%s' first_error: '%s'",
__get_str(dev_name),
show_uc_errs(__entry->status),
show_uc_errs(__entry->first_error)
)
);
#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0)
#define CXL_RAS_CE_MEM_DATA_ECC BIT(1)
#define CXL_RAS_CE_CRC_THRESH BIT(2)
#define CLX_RAS_CE_RETRY_THRESH BIT(3)
#define CXL_RAS_CE_CACHE_POISON BIT(4)
#define CXL_RAS_CE_MEM_POISON BIT(5)
#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6)
#define show_ce_errs(status) __print_flags(status, " | ", \
{ CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
{ CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \
{ CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \
{ CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \
{ CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \
{ CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \
{ CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
)
TRACE_EVENT(cxl_aer_correctable_error,
TP_PROTO(const struct device *dev, u32 status),
TP_ARGS(dev, status),
TP_STRUCT__entry(
__string(dev_name, dev_name(dev))
__field(u32, status)
),
TP_fast_assign(
__assign_str(dev_name, dev_name(dev));
__entry->status = status;
),
TP_printk("%s: status: '%s'",
__get_str(dev_name), show_ce_errs(__entry->status)
)
);
#endif /* _CXL_EVENTS_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE cxl
#include <trace/define_trace.h>
......@@ -1119,6 +1119,7 @@
#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
#define PCI_DOE_CAP_SIZEOF 0x18 /* Size of DOE register block */
/* DOE Data Object - note not actually registers */
#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
......
......@@ -1707,18 +1707,15 @@ static int strict_iomem_checks;
*
* Returns true if exclusive to the kernel, otherwise returns false.
*/
bool iomem_is_exclusive(u64 addr)
bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
{
const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
IORESOURCE_EXCLUSIVE;
bool skip_children = false, err = false;
int size = PAGE_SIZE;
struct resource *p;
addr = addr & PAGE_MASK;
read_lock(&resource_lock);
for_each_resource(&iomem_resource, p, skip_children) {
for_each_resource(root, p, skip_children) {
if (p->start >= addr + size)
break;
if (p->end < addr) {
......@@ -1757,6 +1754,12 @@ bool iomem_is_exclusive(u64 addr)
return err;
}
bool iomem_is_exclusive(u64 addr)
{
return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
PAGE_SIZE);
}
struct resource_entry *resource_list_create_entry(struct resource *res,
size_t extra_size)
{
......
......@@ -672,6 +672,9 @@ config ARCH_HAS_PMEM_API
config MEMREGION
bool
config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
bool
config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
bool
......
......@@ -10,6 +10,7 @@ ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
ldflags-y += --wrap=devm_cxl_enumerate_decoders
ldflags-y += --wrap=cxl_await_media_ready
ldflags-y += --wrap=cxl_hdm_decode_init
ldflags-y += --wrap=cxl_rcrb_to_component
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
......@@ -26,6 +27,7 @@ cxl_acpi-y += config_check.o
obj-m += cxl_pmem.o
cxl_pmem-y := $(CXL_SRC)/pmem.o
cxl_pmem-y += $(CXL_SRC)/security.o
cxl_pmem-y += config_check.o
obj-m += cxl_port.o
......
......@@ -10,4 +10,6 @@ void check(void)
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS));
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI));
BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM));
BUILD_BUG_ON(!IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST));
BUILD_BUG_ON(!IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST));
}
This diff is collapsed.
This diff is collapsed.
......@@ -224,6 +224,25 @@ int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL);
resource_size_t __wrap_cxl_rcrb_to_component(struct device *dev,
resource_size_t rcrb,
enum cxl_rcrb which)
{
int index;
resource_size_t component_reg_phys;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(dev))
component_reg_phys =
ops->cxl_rcrb_to_component(dev, rcrb, which);
else
component_reg_phys = cxl_rcrb_to_component(dev, rcrb, which);
put_cxl_mock_ops(index);
return component_reg_phys;
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcrb_to_component, CXL);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
MODULE_IMPORT_NS(CXL);
......@@ -15,6 +15,9 @@ struct cxl_mock_ops {
acpi_string pathname,
struct acpi_object_list *arguments,
unsigned long long *data);
resource_size_t (*cxl_rcrb_to_component)(struct device *dev,
resource_size_t rcrb,
enum cxl_rcrb which);
struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
bool (*is_mock_bus)(struct pci_bus *bus);
bool (*is_mock_port)(struct device *dev);
......
......@@ -79,7 +79,6 @@ libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o
libnvdimm-y += dimm_devs.o
libnvdimm-y += libnvdimm_test.o
libnvdimm-y += config_check.o
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright Intel Corp. 2018 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/nd.h>
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
#include "nd-core.h"
ssize_t security_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
/*
* For the test version we need to poll the "hardware" in order
* to get the updated status for unlock testing.
*/
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return sprintf(buf, "disabled\n");
if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
return sprintf(buf, "unlocked\n");
if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
return sprintf(buf, "locked\n");
return -ENOTTY;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment