Commit aeaefabc authored by Dan Williams's avatar Dan Williams

Merge branch 'for-6.5/cxl-type-2' into for-6.5/cxl

Pick up the driver cleanups identified in preparation for CXL "type-2"
(accelerator) device support. The major change here from a conflict
generation perspective is the split of 'struct cxl_memdev_state' from
the core 'struct cxl_dev_state'. Since an accelerator may not care about
all the optional features that are standard on a CXL "type-3" (host-only
memory expander) device.

A silent conflict also occurs with the move of the endpoint port to be a
formal property of a 'struct cxl_memdev' rather than drvdata.
parents 867eab65 8f0220af
......@@ -258,7 +258,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
.start = res->start,
.end = res->end,
......
......@@ -570,8 +570,9 @@ static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
{
u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
CXL_HDM_DECODER0_CTRL_TYPE);
u32p_replace_bits(ctrl,
!!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
CXL_HDM_DECODER0_CTRL_HOSTONLY);
}
static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
......@@ -764,7 +765,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
if (!len)
return -ENOENT;
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = NULL;
cxld->reset = NULL;
cxld->hpa_range = info->dvsec_range[which];
......@@ -793,8 +794,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map, void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
u64 size, base, skip, dpa_size, lo, hi;
struct cxl_endpoint_decoder *cxled;
bool committed;
u32 remainder;
int i, rc;
......@@ -827,6 +828,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
if (info)
cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
......@@ -837,10 +840,10 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
cxld->target_type = CXL_DECODER_EXPANDER;
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
cxld->target_type = CXL_DECODER_ACCELERATOR;
cxld->target_type = CXL_DECODER_DEVMEM;
if (cxld->id != port->commit_end + 1) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
......@@ -856,12 +859,28 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
port->commit_end = cxld->id;
} else {
/* unless / until type-2 drivers arrive, assume type-3 */
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
if (cxled) {
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
/*
* Default by devtype until a device arrives that needs
* more precision.
*/
if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
cxld->target_type = CXL_DECODER_DEVMEM;
} else {
/* To be overridden by region type at commit time */
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
}
if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
}
cxld->target_type = CXL_DECODER_EXPANDER;
}
rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
&cxld->interleave_ways);
......@@ -880,7 +899,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
cxld->interleave_ways, cxld->interleave_granularity);
if (!info) {
if (!cxled) {
lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
......@@ -903,7 +922,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
skip = (hi << 32) + lo;
cxled = to_cxl_endpoint_decoder(&cxld->dev);
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) {
dev_err(&port->dev,
......
This diff is collapsed.
This diff is collapsed.
......@@ -308,36 +308,17 @@ static void disable_hdm(void *_cxlhdm)
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
}
int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
{
void __iomem *hdm;
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
u32 global_ctrl;
/*
* If the hdm capability was not mapped there is nothing to enable and
* the caller is responsible for what happens next. For example,
* emulate a passthrough decoder.
*/
if (IS_ERR(cxlhdm))
return 0;
hdm = cxlhdm->regs.hdm_decoder;
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
/*
* If the HDM decoder capability was enabled on entry, skip
* registering disable_hdm() since this decode capability may be
* owned by platform firmware.
*/
if (global_ctrl & CXL_HDM_DECODER_ENABLE)
return 0;
writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm);
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL);
int cxl_dvsec_rr_decode(struct device *dev, int d,
struct cxl_endpoint_dvsec_info *info)
......@@ -511,7 +492,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
if (info->mem_enabled)
return 0;
rc = devm_cxl_enable_hdm(port, cxlhdm);
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
return rc;
......
......@@ -64,7 +64,7 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
struct cxl_port *port = find_cxl_root(cxlmd->endpoint);
struct device *dev;
if (!port)
......
......@@ -117,9 +117,9 @@ static ssize_t target_type_show(struct device *dev,
struct cxl_decoder *cxld = to_cxl_decoder(dev);
switch (cxld->target_type) {
case CXL_DECODER_ACCELERATOR:
case CXL_DECODER_DEVMEM:
return sysfs_emit(buf, "accelerator\n");
case CXL_DECODER_EXPANDER:
case CXL_DECODER_HOSTONLYMEM:
return sysfs_emit(buf, "expander\n");
}
return -ENXIO;
......@@ -1161,7 +1161,7 @@ static struct device *grandparent(struct device *dev)
static void delete_endpoint(void *data)
{
struct cxl_memdev *cxlmd = data;
struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
struct cxl_port *endpoint = cxlmd->endpoint;
struct cxl_port *parent_port;
struct device *parent;
......@@ -1176,6 +1176,7 @@ static void delete_endpoint(void *data)
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
cxlmd->endpoint = NULL;
device_unlock(parent);
put_device(parent);
out:
......@@ -1187,7 +1188,7 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
struct device *dev = &cxlmd->dev;
get_device(&endpoint->dev);
dev_set_drvdata(dev, endpoint);
cxlmd->endpoint = endpoint;
cxlmd->depth = endpoint->depth;
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
}
......@@ -1550,7 +1551,7 @@ static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
/* Pre initialize an "empty" decoder */
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->hpa_range = (struct range) {
.start = 0,
.end = -1,
......
......@@ -809,6 +809,18 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
return -EBUSY;
}
/*
* Endpoints should already match the region type, but backstop that
* assumption with an assertion. Switch-decoders change mapping-type
* based on what is mapped when they are assigned to a region.
*/
dev_WARN_ONCE(&cxlr->dev,
port == cxled_to_port(cxled) &&
cxld->target_type != cxlr->type,
"%s:%s mismatch decoder type %d -> %d\n",
dev_name(&cxled_to_memdev(cxled)->dev),
dev_name(&cxld->dev), cxld->target_type, cxlr->type);
cxld->target_type = cxlr->type;
cxl_rr->decoder = cxld;
return 0;
}
......@@ -2103,7 +2115,7 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
return ERR_PTR(-EBUSY);
}
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_EXPANDER);
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
}
static ssize_t create_pmem_region_store(struct device *dev,
......
......@@ -200,10 +200,10 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
}
int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
struct cxl_register_map *map, unsigned long map_mask)
const struct cxl_register_map *map, unsigned long map_mask)
{
struct mapinfo {
struct cxl_reg_map *rmap;
const struct cxl_reg_map *rmap;
void __iomem **addr;
} mapinfo[] = {
{ &map->component_map.hdm_decoder, &regs->hdm_decoder },
......@@ -233,11 +233,11 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
int cxl_map_device_regs(struct device *dev,
struct cxl_device_regs *regs,
struct cxl_register_map *map)
const struct cxl_register_map *map)
{
resource_size_t phys_addr = map->resource;
struct mapinfo {
struct cxl_reg_map *rmap;
const struct cxl_reg_map *rmap;
void __iomem **addr;
} mapinfo[] = {
{ &map->device_map.status, &regs->status, },
......
......@@ -56,7 +56,7 @@
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
#define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
......@@ -262,10 +262,10 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_reg_map *map);
int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
struct cxl_register_map *map,
const struct cxl_register_map *map,
unsigned long map_mask);
int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs,
struct cxl_register_map *map);
const struct cxl_register_map *map);
enum cxl_regloc_type;
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
......@@ -298,8 +298,8 @@ resource_size_t cxl_rcrb_to_component(struct device *dev,
#define CXL_DECODER_F_MASK GENMASK(5, 0)
enum cxl_decoder_type {
CXL_DECODER_ACCELERATOR = 2,
CXL_DECODER_EXPANDER = 3,
CXL_DECODER_DEVMEM = 2,
CXL_DECODER_HOSTONLYMEM = 3,
};
/*
......@@ -718,7 +718,6 @@ struct cxl_endpoint_dvsec_info {
struct cxl_hdm;
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
......
......@@ -39,6 +39,7 @@
* @detach_work: active memdev lost a port in its ancestry
* @cxl_nvb: coordinate removal of @cxl_nvd if present
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
* @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
*/
......@@ -49,6 +50,7 @@ struct cxl_memdev {
struct work_struct detach_work;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
struct cxl_port *endpoint;
int id;
int depth;
};
......@@ -83,7 +85,8 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
}
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds);
struct cxl_memdev_state;
int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
......@@ -202,7 +205,7 @@ static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd)
*/
#define CXL_CAPACITY_MULTIPLIER SZ_256M
/**
/*
* Event Interrupt Policy
*
* CXL rev 3.0 section 8.2.9.2.4; Table 8-52
......@@ -222,8 +225,8 @@ struct cxl_event_interrupt_policy {
/**
* struct cxl_event_state - Event log driver state
*
* @event_buf: Buffer to receive event data
* @event_log_lock: Serialize event_buf and log use
* @buf: Buffer to receive event data
* @log_lock: Serialize event_buf and log use
*/
struct cxl_event_state {
struct cxl_get_event_payload *buf;
......@@ -356,6 +359,20 @@ struct cxl_security_state {
struct kernfs_node *sanitize_node;
};
/*
* enum cxl_devtype - delineate type-2 from a generic type-3 device
* @CXL_DEVTYPE_DEVMEM - Vendor specific CXL Type-2 device implementing HDM-D or
* HDM-DB, no requirement that this device implements a
* mailbox, or other memory-device-standard manageability
* flows.
* @CXL_DEVTYPE_CLASSMEM - Common class definition of a CXL Type-3 device with
* HDM-H and class-mandatory memory device registers
*/
enum cxl_devtype {
CXL_DEVTYPE_DEVMEM,
CXL_DEVTYPE_CLASSMEM,
};
/**
* struct cxl_dev_state - The driver device state
*
......@@ -369,6 +386,36 @@ struct cxl_security_state {
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
* @dpa_res: Overall DPA resource tree for the device
* @pmem_res: Active Persistent memory capacity configuration
* @ram_res: Active Volatile memory capacity configuration
* @component_reg_phys: register base of component registers
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
bool media_ready;
struct resource dpa_res;
struct resource pmem_res;
struct resource ram_res;
resource_size_t component_reg_phys;
u64 serial;
enum cxl_devtype type;
};
/**
* struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data
*
* CXL 8.1.12.1 PCI Header - Class Code Register Memory Device defines
* common memory device functionality like the presence of a mailbox and
* the functionality related to that like Identify Memory Device and Get
* Partition Info
* @cxlds: Core driver state common across Type-2 and Type-3 devices
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area
......@@ -377,9 +424,6 @@ struct cxl_security_state {
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
* @dpa_res: Overall DPA resource tree for the device
* @pmem_res: Active Persistent memory capacity configuration
* @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
......@@ -388,58 +432,48 @@ struct cxl_security_state {
* @active_persistent_bytes: sum of hard + soft persistent
* @next_volatile_bytes: volatile capacity change pending device reset
* @next_persistent_bytes: persistent capacity change pending device reset
* @component_reg_phys: register base of component registers
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
* @event: event log driver state
* @poison: poison driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
bool media_ready;
struct cxl_memdev_state {
struct cxl_dev_state cxlds;
size_t payload_size;
size_t lsa_size;
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10];
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
struct resource dpa_res;
struct resource pmem_res;
struct resource ram_res;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
u64 partition_align_bytes;
u64 active_volatile_bytes;
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
resource_size_t component_reg_phys;
u64 serial;
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
struct cxl_fw_state fw;
struct rcuwait mbox_wait;
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
int (*mbox_send)(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *cmd);
};
static inline struct cxl_memdev_state *
to_cxl_memdev_state(struct cxl_dev_state *cxlds)
{
if (cxlds->type != CXL_DEVTYPE_CLASSMEM)
return NULL;
return container_of(cxlds, struct cxl_memdev_state, cxlds);
}
enum cxl_opcode {
CXL_MBOX_OP_INVALID = 0x0000,
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
......@@ -801,18 +835,20 @@ enum {
CXL_PMEM_SEC_PASS_USER,
};
int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *cmd);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status);
int cxl_set_timestamp(struct cxl_dev_state *cxlds);
int cxl_poison_state_init(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr);
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
......@@ -831,7 +867,7 @@ static inline void cxl_mem_active_dec(void)
}
#endif
int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd);
int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
struct cxl_hdm {
struct cxl_component_regs regs;
......
......@@ -117,6 +117,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *endpoint_parent;
struct cxl_port *parent_port;
......@@ -141,10 +142,10 @@ static int cxl_mem_probe(struct device *dev)
dentry = cxl_debugfs_create_dir(dev_name(dev));
debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
if (test_bit(CXL_POISON_ENABLED_INJECT, cxlds->poison.enabled_cmds))
if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds))
debugfs_create_file("inject_poison", 0200, dentry, cxlmd,
&cxl_poison_inject_fops);
if (test_bit(CXL_POISON_ENABLED_CLEAR, cxlds->poison.enabled_cmds))
if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds))
debugfs_create_file("clear_poison", 0200, dentry, cxlmd,
&cxl_poison_clear_fops);
......@@ -227,9 +228,12 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
if (a == &dev_attr_trigger_poison_list.attr) {
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds =
to_cxl_memdev_state(cxlmd->cxlds);
if (!test_bit(CXL_POISON_ENABLED_LIST,
to_cxl_memdev(dev)->cxlds->poison.enabled_cmds))
mds->poison.enabled_cmds))
return 0;
}
return a->mode;
......
This diff is collapsed.
......@@ -15,9 +15,9 @@ extern const struct nvdimm_security_ops *cxl_security_ops;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *cxlds)
static void clear_exclusive(void *mds)
{
clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
clear_exclusive_cxl_commands(mds, exclusive_cmds);
}
static void unregister_nvdimm(void *nvdimm)
......@@ -65,13 +65,13 @@ static int cxl_nvdimm_probe(struct device *dev)
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long flags = 0, cmd_mask = 0;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct nvdimm *nvdimm;
int rc;
set_exclusive_cxl_commands(cxlds, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
set_exclusive_cxl_commands(mds, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, mds);
if (rc)
return rc;
......@@ -100,22 +100,23 @@ static struct cxl_driver cxl_nvdimm_driver = {
},
};
static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
if (sizeof(*cmd) > buf_len)
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size) {
.config_size = cxlds->lsa_size,
.max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
*cmd = (struct nd_cmd_get_config_size){
.config_size = mds->lsa_size,
.max_xfer =
mds->payload_size - sizeof(struct cxl_mbox_set_lsa),
};
return 0;
}
static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
......@@ -140,13 +141,13 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
.payload_out = cmd->out_buf,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
cmd->status = 0;
return rc;
}
static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
......@@ -176,7 +177,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
.size_in = struct_size(set_lsa, data, cmd->in_length),
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
/*
* Set "firmware" status (4-packed bytes at the end of the input
......@@ -194,18 +195,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
return cxl_pmem_get_config_size(cxlds, buf, buf_len);
return cxl_pmem_get_config_size(mds, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA:
return cxl_pmem_get_config_data(cxlds, buf, buf_len);
return cxl_pmem_get_config_data(mds, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA:
return cxl_pmem_set_config_data(cxlds, buf, buf_len);
return cxl_pmem_set_config_data(mds, buf, buf_len);
default:
return -ENOTTY;
}
......
......@@ -60,17 +60,13 @@ static int discover_region(struct device *dev, void *root)
static int cxl_switch_port_probe(struct cxl_port *port)
{
struct cxl_hdm *cxlhdm;
int rc, nr_dports;
nr_dports = devm_cxl_port_enumerate_dports(port);
if (nr_dports < 0)
return nr_dports;
int rc;
cxlhdm = devm_cxl_setup_hdm(port, NULL);
rc = devm_cxl_enable_hdm(port, cxlhdm);
if (rc)
rc = devm_cxl_port_enumerate_dports(port);
if (rc < 0)
return rc;
cxlhdm = devm_cxl_setup_hdm(port, NULL);
if (!IS_ERR(cxlhdm))
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
......@@ -79,7 +75,7 @@ static int cxl_switch_port_probe(struct cxl_port *port)
return PTR_ERR(cxlhdm);
}
if (nr_dports == 1) {
if (rc == 1) {
dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
return devm_cxl_add_passthrough_decoder(port);
}
......
......@@ -14,7 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long security_flags = 0;
struct cxl_get_security_output {
__le32 flags;
......@@ -29,13 +29,13 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
.payload_out = &out,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return 0;
sec_out = le32_to_cpu(out.flags);
/* cache security state */
cxlds->security.state = sec_out;
mds->security.state = sec_out;
if (ptype == NVDIMM_MASTER) {
if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
......@@ -70,7 +70,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_set_pass set_pass;
......@@ -87,7 +87,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
.payload_in = &set_pass,
};
return cxl_internal_send_cmd(cxlds, &mbox_cmd);
return cxl_internal_send_cmd(mds, &mbox_cmd);
}
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
......@@ -96,7 +96,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_disable_pass dis_pass;
struct cxl_mbox_cmd mbox_cmd;
......@@ -112,7 +112,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
.payload_in = &dis_pass,
};
return cxl_internal_send_cmd(cxlds, &mbox_cmd);
return cxl_internal_send_cmd(mds, &mbox_cmd);
}
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
......@@ -131,12 +131,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_FREEZE_SECURITY,
};
return cxl_internal_send_cmd(cxlds, &mbox_cmd);
return cxl_internal_send_cmd(mds, &mbox_cmd);
}
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
......@@ -144,7 +144,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
u8 pass[NVDIMM_PASSPHRASE_LEN];
struct cxl_mbox_cmd mbox_cmd;
int rc;
......@@ -156,7 +156,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
.payload_in = pass,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return rc;
......@@ -169,7 +169,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_pass_erase erase;
int rc;
......@@ -185,7 +185,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
.payload_in = &erase,
};
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0)
return rc;
......
......@@ -6,7 +6,6 @@ ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=nvdimm_bus_register
ldflags-y += --wrap=devm_cxl_port_enumerate_dports
ldflags-y += --wrap=devm_cxl_setup_hdm
ldflags-y += --wrap=devm_cxl_enable_hdm
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
ldflags-y += --wrap=devm_cxl_enumerate_decoders
ldflags-y += --wrap=cxl_await_media_ready
......
......@@ -713,7 +713,7 @@ static void default_mock_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 1;
cxld->interleave_granularity = 256;
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
}
......@@ -787,7 +787,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 2;
eig_to_granularity(window->granularity, &cxld->interleave_granularity);
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
cxled->state = CXL_DECODER_STATE_AUTO;
port->commit_end = cxld->id;
......@@ -820,7 +820,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
} else
cxlsd->target[0] = dport;
cxld = &cxlsd->cxld;
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
iter->commit_end = 0;
/*
......
This diff is collapsed.
......@@ -149,21 +149,6 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, CXL);
int __wrap_devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
{
int index, rc;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport))
rc = 0;
else
rc = devm_cxl_enable_hdm(port, cxlhdm);
put_cxl_mock_ops(index);
return rc;
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enable_hdm, CXL);
int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
int rc, index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment