Commit 733b57f2 authored by Robert Richter's avatar Robert Richter Committed by Dan Williams

cxl/pci: Early setup RCH dport component registers from RCRB

CXL RAS capabilities must be enabled and accessible as soon as the CXL
endpoint is detected in the PCI hierarchy and bound to the cxl_pci
driver. This needs to be independent of other modules such as cxl_port
or cxl_mem.

CXL RAS capabilities reside in the Component Registers. For an RCH
this is determined by probing RCRB which is implemented very late once
the CXL Memory Device is created.

Change this by moving the RCRB probe to the cxl_pci driver. Do this by
using a new introduced function cxl_pci_find_port() similar to
cxl_mem_find_port() to determine the involved dport by the endpoint's
PCI handle. Plug this into the existing cxl_pci_setup_regs() function
to setup Component Registers. Probe the RCRB in case the Component
Registers cannot be located through the CXL Register Locator
capability.

This unifies code and early sets up the Component Registers at the
same time for both, VH and RCH mode. Only the cxl_pci driver is
involved for this. This allows an early mapping of the CXL RAS
capability registers.
Signed-off-by: default avatarRobert Richter <rrichter@amd.com>
Signed-off-by: default avatarTerry Bowman <terry.bowman@amd.com>
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/20230622205523.85375-14-terry.bowman@amd.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 86917c56
...@@ -1480,6 +1480,13 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) ...@@ -1480,6 +1480,13 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
} }
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
struct cxl_dport **dport)
{
return find_cxl_port(pdev->dev.parent, dport);
}
EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
struct cxl_dport **dport) struct cxl_dport **dport)
{ {
......
...@@ -664,6 +664,8 @@ struct cxl_port *find_cxl_root(struct cxl_port *port); ...@@ -664,6 +664,8 @@ struct cxl_port *find_cxl_root(struct cxl_port *port);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void); void cxl_bus_rescan(void);
void cxl_bus_drain(void); void cxl_bus_drain(void);
struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
struct cxl_dport **dport);
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
struct cxl_dport **dport); struct cxl_dport **dport);
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
......
...@@ -65,15 +65,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, ...@@ -65,15 +65,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
ep->next = down; ep->next = down;
} }
/*
* The component registers for an RCD might come from the
* host-bridge RCRB if they are not already mapped via the
* typical register locator mechanism.
*/
if (parent_dport->rch && cxlds->component_reg_phys == CXL_RESOURCE_NONE)
cxlds->component_reg_phys =
cxl_rcd_component_reg_phys(&cxlmd->dev, parent_dport);
endpoint = devm_cxl_add_port(host, &cxlmd->dev, endpoint = devm_cxl_add_port(host, &cxlmd->dev,
cxlds->component_reg_phys, cxlds->component_reg_phys,
parent_dport); parent_dport);
......
...@@ -274,27 +274,66 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) ...@@ -274,27 +274,66 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
return 0; return 0;
} }
/*
* Assume that any RCIEP that emits the CXL memory expander class code
* is an RCD
*/
static bool is_cxl_restricted(struct pci_dev *pdev)
{
return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
struct cxl_register_map *map)
{
struct cxl_port *port;
struct cxl_dport *dport;
resource_size_t component_reg_phys;
*map = (struct cxl_register_map) {
.dev = &pdev->dev,
.resource = CXL_RESOURCE_NONE,
};
port = cxl_pci_find_port(pdev, &dport);
if (!port)
return -EPROBE_DEFER;
component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport);
put_device(&port->dev);
if (component_reg_phys == CXL_RESOURCE_NONE)
return -ENXIO;
map->resource = component_reg_phys;
map->reg_type = CXL_REGLOC_RBI_COMPONENT;
map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
return 0;
}
static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map) struct cxl_register_map *map)
{ {
int rc; int rc;
rc = cxl_find_regblock(pdev, type, map); rc = cxl_find_regblock(pdev, type, map);
/*
* If the Register Locator DVSEC does not exist, check if it
* is an RCH and try to extract the Component Registers from
* an RCRB.
*/
if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
rc = cxl_rcrb_get_comp_regs(pdev, map);
if (rc) if (rc)
return rc; return rc;
return cxl_setup_regs(map); return cxl_setup_regs(map);
} }
/*
* Assume that any RCIEP that emits the CXL memory expander class code
* is an RCD
*/
static bool is_cxl_restricted(struct pci_dev *pdev)
{
return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
/* /*
* CXL v3.0 6.2.3 Table 6-4 * CXL v3.0 6.2.3 Table 6-4
* The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment