Commit 59b748cd authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branch 'pci/crs'

- Wait for device readiness after reset by polling Vendor ID and looking
  for Configuration RRS instead of polling the Command register and looking
  for non-error completions (Bjorn Helgaas)

- Fix an aardvark issue with emulating Configuration RRS for two-byte reads
  of Vendor ID; previously it only worked for four-byte reads (Bjorn
  Helgaas)

- Rename CRS Completion Status to RRS to match spec usage (Bjorn Helgaas)

* pci/crs:
  PCI: Rename CRS Completion Status to RRS
  PCI: aardvark: Correct Configuration RRS checking
  PCI: Wait for device readiness with Configuration RRS
parents 1a346559 87f10faf
...@@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev, ...@@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
} }
/* If the root port is capable of returning Config Request /* If the root port is capable of returning Config Request
* Retry Status (CRS) Completion Status to software then * Retry Status (RRS) Completion Status to software then
* enable the feature. * enable the feature.
*/ */
static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
...@@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) ...@@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
NULL); NULL);
root_cap = cap_ptr + PCI_EXP_RTCAP; root_cap = cap_ptr + PCI_EXP_RTCAP;
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16)); bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) { if (val16 & BCMA_CORE_PCI_RC_RRS_VISIBILITY) {
/* Enable CRS software visibility */ /* Enable Configuration RRS Software Visibility */
root_ctrl = cap_ptr + PCI_EXP_RTCTL; root_ctrl = cap_ptr + PCI_EXP_RTCTL;
val16 = PCI_EXP_RTCTL_CRSSVE; val16 = PCI_EXP_RTCTL_RRS_SVE;
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16, bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
sizeof(u16)); sizeof(u16));
...@@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) ...@@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
* 100 ms wait time from the end of Reset. If the device is * 100 ms wait time from the end of Reset. If the device is
* not done with its internal initialization, it must at * not done with its internal initialization, it must at
* least return a completion TLP, with a completion status * least return a completion TLP, with a completion status
* of "Configuration Request Retry Status (CRS)". The root * of "Configuration Request Retry Status (RRS)". The root
* complex must complete the request to the host by returning * complex must complete the request to the host by returning
* a read-data value of 0001h for the Vendor ID field and * a read-data value of 0001h for the Vendor ID field and
* all 1s for any additional bytes included in the request. * all 1s for any additional bytes included in the request.
......
...@@ -183,11 +183,11 @@ ...@@ -183,11 +183,11 @@
#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 #define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) #define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
#define AMBA_ERROR_RESPONSE_CRS_OKAY 0 #define AMBA_ERROR_RESPONSE_RRS_OKAY 0
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940 #define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
...@@ -907,11 +907,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) ...@@ -907,11 +907,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
/* Enable as 0xFFFF0001 response for CRS */ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
AMBA_ERROR_RESPONSE_CRS_SHIFT); AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
/* Clear Slot Clock Configuration bit if SRNS configuration */ /* Clear Slot Clock Configuration bit if SRNS configuration */
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0 #define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1 #define PIO_COMPLETION_STATUS_UR 1
#define PIO_COMPLETION_STATUS_CRS 2 #define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4 #define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10) #define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11) #define PIO_ERR_STATUS BIT(11)
...@@ -262,7 +262,7 @@ enum { ...@@ -262,7 +262,7 @@ enum {
#define MSI_IRQ_NUM 32 #define MSI_IRQ_NUM 32
#define CFG_RD_CRS_VAL 0xffff0001 #define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie { struct advk_pcie {
struct platform_device *pdev; struct platform_device *pdev;
...@@ -649,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) ...@@ -649,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie); advk_pcie_train_link(pcie);
} }
static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val) static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{ {
struct device *dev = &pcie->pdev->dev; struct device *dev = &pcie->pdev->dev;
u32 reg; u32 reg;
...@@ -669,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -669,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with * means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF. * a read value of 0xFFFFFFFF.
* 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7) * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful * only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001. * with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
...@@ -694,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -694,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR"; strcomp_status = "UR";
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
break; break;
case PIO_COMPLETION_STATUS_CRS: case PIO_COMPLETION_STATUS_RRS:
if (allow_crs && val) { if (allow_rrs && val) {
/* PCIe r4.0, sec 2.3.2, says: /* PCIe r6.0, sec 2.3.2, says:
* If CRS Software Visibility is enabled: * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both * For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's * bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must * Configuration Space Header, the Root Complex must
...@@ -706,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -706,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the * all '1's for any additional bytes included in the
* request. * request.
* *
* So CRS in this case is not an error status. * So RRS in this case is not an error status.
*/ */
*val = CFG_RD_CRS_VAL; *val = CFG_RD_RRS_VAL;
strcomp_status = NULL; strcomp_status = NULL;
ret = 0; ret = 0;
break; break;
} }
/* PCIe r4.0, sec 2.3.2, says: /* PCIe r6.0, sec 2.3.2, says:
* If CRS Software Visibility is not enabled, the Root Complex * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request. * must re-issue the Configuration Request as a new Request.
* If CRS Software Visibility is enabled: For a Configuration * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request, * Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as * the Root Complex must re-issue the Configuration Request as
* a new Request. * a new Request.
* A Root Complex implementation may choose to limit the number * A Root Complex implementation may choose to limit the number
* of Configuration Request/CRS Completion Status loops before * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the * determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the * Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction. * Request to the host as a failed transaction.
...@@ -729,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -729,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will * So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries. * re-issue request again up to the PIO_RETRY_CNT retries.
*/ */
strcomp_status = "CRS"; strcomp_status = "RRS";
ret = -EAGAIN; ret = -EAGAIN;
break; break;
case PIO_COMPLETION_STATUS_CA: case PIO_COMPLETION_STATUS_CA:
...@@ -920,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, ...@@ -920,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: { case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
/* Only emulation of PMEIE and CRSSVE bits is provided */ /* Only emulation of PMEIE and RRS_SVE bits is provided */
rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break; break;
} }
...@@ -1075,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) ...@@ -1075,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */ /* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff; bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16; bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
...@@ -1141,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -1141,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{ {
struct advk_pcie *pcie = bus->sysdata; struct advk_pcie *pcie = bus->sysdata;
int retry_count; int retry_count;
bool allow_crs; bool allow_rrs;
u32 reg; u32 reg;
int ret; int ret;
...@@ -1153,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -1153,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val); size, val);
/* /*
* Completion Retry Status is possible to return only when reading all * Configuration Request Retry Status (RRS) is possible to return
* 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and * only when reading both bytes from PCI_VENDOR_ID at once and
* CRSSVE flag on Root Bridge is enabled. * RRS_SVE flag on Root Port is enabled.
*/ */
allow_crs = (where == PCI_VENDOR_ID) && (size == 4) && allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
PCI_EXP_RTCTL_CRSSVE); PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie)) if (advk_pcie_pio_is_running(pcie))
goto try_crs; goto try_rrs;
/* Program the control register */ /* Program the control register */
reg = advk_readl(pcie, PIO_CTRL); reg = advk_readl(pcie, PIO_CTRL);
...@@ -1189,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -1189,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie); ret = advk_pcie_wait_pio(pcie);
if (ret < 0) if (ret < 0)
goto try_crs; goto try_rrs;
retry_count += ret; retry_count += ret;
/* Check PIO status and get the read result */ /* Check PIO status and get the read result */
ret = advk_pcie_check_pio_status(pcie, allow_crs, val); ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0) if (ret < 0)
...@@ -1207,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -1207,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
try_crs: try_rrs:
/* /*
* If it is possible, return Completion Retry Status so that caller * If it is possible, return Configuration Request Retry Status so
* tries to issue the request again instead of failing. * that caller tries to issue the request again instead of failing.
*/ */
if (allow_crs) { if (allow_rrs) {
*val = CFG_RD_CRS_VAL; *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
} }
......
...@@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, ...@@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
/* /*
* The v1 controller has a bug in its Configuration Request Retry * The v1 controller has a bug in its Configuration Request Retry
* Status (CRS) logic: when CRS Software Visibility is enabled and * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the * we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists * controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE) * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry * ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to * the read until it times out. Avoid this by not claiming to
* support CRS SV. * support RRS SV.
*/ */
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2) if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#define CFG_RD_SUCCESS 0 #define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1 #define CFG_RD_UR 1
#define CFG_RD_CRS 2 #define CFG_RD_RRS 2
#define CFG_RD_CA 3 #define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001 #define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
...@@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, ...@@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status; u32 status;
/* /*
* As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any * affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the * other config reads, the Root may automatically reissue the
* configuration request again as a new request. * configuration request again as a new request.
* *
* For config reads, this hardware returns CFG_RETRY_STATUS data * For config reads, this hardware returns CFG_RETRY_STATUS data
* when it receives a CRS completion, regardless of the address of * when it receives a RRS completion, regardless of the address of
* the read or the CRS Software Visibility Enable bit. As a * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that * partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS. * returns CFG_RETRY_STATUS.
* *
* Note that a non-Vendor ID config register may have a value of * Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from * CFG_RETRY_STATUS. If we read that, we can't distinguish it from
* a CRS completion, so we will incorrectly retry the read and * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff). * eventually return the wrong data (0xffffffff).
*/ */
data = readl(cfg_data_p); data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) { while (data == CFG_RETRY_STATUS && timeout--) {
/* /*
* CRS state is set in CFG_RD status register * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is * This will handle the case where CFG_RETRY_STATUS is
* valid config data. * valid config data.
*/ */
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
if (status != CFG_RD_CRS) if (status != CFG_RD_RRS)
return data; return data;
udelay(1); udelay(1);
...@@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val) ...@@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break; break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
/* Don't advertise CRS SV support */ /* Don't advertise RRS SV support */
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break; break;
default: default:
......
...@@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = ...@@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
*/ */
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | .rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
PCI_EXP_RTCTL_CRSSVE), PCI_EXP_RTCTL_RRS_SVE),
.ro = PCI_EXP_RTCAP_CRSVIS << 16, .ro = PCI_EXP_RTCAP_RRS_SV << 16,
}, },
[PCI_EXP_RTSTA / 4] = { [PCI_EXP_RTSTA / 4] = {
......
...@@ -1283,7 +1283,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) ...@@ -1283,7 +1283,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
{ {
int delay = 1; int delay = 1;
bool retrain = false; bool retrain = false;
struct pci_dev *bridge; struct pci_dev *root, *bridge;
root = pcie_find_root_port(dev);
if (pci_is_pcie(dev)) { if (pci_is_pcie(dev)) {
bridge = pci_upstream_bridge(dev); bridge = pci_upstream_bridge(dev);
...@@ -1292,16 +1294,23 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) ...@@ -1292,16 +1294,23 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
} }
/* /*
* After reset, the device should not silently discard config * The caller has already waited long enough after a reset that the
* requests, but it may still indicate that it needs more time by * device should respond to config requests, but it may respond
* responding to them with CRS completions. The Root Port will * with Request Retry Status (RRS) if it needs more time to
* generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete * initialize.
* the read (except when CRS SV is enabled and the read was for the *
* Vendor ID; in that case it synthesizes 0x0001 data). * If the device is below a Root Port with Configuration RRS
* Software Visibility enabled, reading the Vendor ID returns a
* special data value if the device responded with RRS. Read the
* Vendor ID until we get non-RRS status.
* *
* Wait for the device to return a non-CRS completion. Read the * If there's no Root Port or Configuration RRS Software Visibility
* Command register instead of Vendor ID so we don't have to * is not enabled, the device may still respond with RRS, but
* contend with the CRS SV value. * hardware may retry the config request. If no retries receive
* Successful Completion, hardware generally synthesizes ~0
* (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
* ID for VFs and non-existent devices also returns ~0, so read the
* Command register until it returns something other than ~0.
*/ */
for (;;) { for (;;) {
u32 id; u32 id;
...@@ -1311,9 +1320,15 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) ...@@ -1311,9 +1320,15 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
return -ENOTTY; return -ENOTTY;
} }
pci_read_config_dword(dev, PCI_COMMAND, &id); if (root && root->config_rrs_sv) {
if (!PCI_POSSIBLE_ERROR(id)) pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
break; if (!pci_bus_rrs_vendor_id(id))
break;
} else {
pci_read_config_dword(dev, PCI_COMMAND, &id);
if (!PCI_POSSIBLE_ERROR(id))
break;
}
if (delay > timeout) { if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n", pci_warn(dev, "not ready %dms after %s; giving up\n",
......
...@@ -139,6 +139,11 @@ bool pci_bridge_d3_possible(struct pci_dev *dev); ...@@ -139,6 +139,11 @@ bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type); int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
static inline bool pci_bus_rrs_vendor_id(u32 l)
{
return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
}
static inline void pci_wakeup_event(struct pci_dev *dev) static inline void pci_wakeup_event(struct pci_dev *dev)
{ {
/* Wait 100 ms before the system can be put into a sleep state. */ /* Wait 100 ms before the system can be put into a sleep state. */
...@@ -290,10 +295,10 @@ void pci_put_host_bridge_device(struct device *dev); ...@@ -290,10 +295,10 @@ void pci_put_host_bridge_device(struct device *dev);
int pci_configure_extended_tags(struct pci_dev *dev, void *ign); int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout); int rrs_timeout);
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout); int rrs_timeout);
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev); int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
......
...@@ -1203,15 +1203,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, ...@@ -1203,15 +1203,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
} }
EXPORT_SYMBOL(pci_add_new_bus); EXPORT_SYMBOL(pci_add_new_bus);
static void pci_enable_crs(struct pci_dev *pdev) static void pci_enable_rrs_sv(struct pci_dev *pdev)
{ {
u16 root_cap = 0; u16 root_cap = 0;
/* Enable CRS Software Visibility if supported */ /* Enable Configuration RRS Software Visibility if supported */
pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
if (root_cap & PCI_EXP_RTCAP_CRSVIS) if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
pcie_capability_set_word(pdev, PCI_EXP_RTCTL, pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
PCI_EXP_RTCTL_CRSSVE); PCI_EXP_RTCTL_RRS_SVE);
pdev->config_rrs_sv = 1;
}
} }
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
...@@ -1326,7 +1328,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, ...@@ -1326,7 +1328,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
pci_enable_crs(dev); pci_enable_rrs_sv(dev);
if ((secondary || subordinate) && !pcibios_assign_all_busses() && if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) { !is_cardbus && !broken) {
...@@ -2343,28 +2345,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) ...@@ -2343,28 +2345,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
} }
EXPORT_SYMBOL(pci_alloc_dev); EXPORT_SYMBOL(pci_alloc_dev);
static bool pci_bus_crs_vendor_id(u32 l) static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
{
return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
}
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
int timeout) int timeout)
{ {
int delay = 1; int delay = 1;
if (!pci_bus_crs_vendor_id(*l)) if (!pci_bus_rrs_vendor_id(*l))
return true; /* not a CRS completion */ return true; /* not a Configuration RRS completion */
if (!timeout) if (!timeout)
return false; /* CRS, but caller doesn't want to wait */ return false; /* RRS, but caller doesn't want to wait */
/* /*
* We got the reserved Vendor ID that indicates a completion with * We got the reserved Vendor ID that indicates a completion with
* Configuration Request Retry Status (CRS). Retry until we get a * Configuration Request Retry Status (RRS). Retry until we get a
* valid Vendor ID or we time out. * valid Vendor ID or we time out.
*/ */
while (pci_bus_crs_vendor_id(*l)) { while (pci_bus_rrs_vendor_id(*l)) {
if (delay > timeout) { if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number, pci_domain_nr(bus), bus->number,
...@@ -2403,8 +2400,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, ...@@ -2403,8 +2400,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000) *l == 0x0000ffff || *l == 0xffff0000)
return false; return false;
if (pci_bus_crs_vendor_id(*l)) if (pci_bus_rrs_vendor_id(*l))
return pci_bus_wait_crs(bus, devfn, l, timeout); return pci_bus_wait_rrs(bus, devfn, l, timeout);
return true; return true;
} }
......
...@@ -203,7 +203,7 @@ struct pci_dev; ...@@ -203,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840 #define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */ /* PCIE Root Capability Register bits (Host mode only) */
#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 #define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci; struct bcma_drv_pci;
struct bcma_bus; struct bcma_bus;
......
...@@ -371,6 +371,7 @@ struct pci_dev { ...@@ -371,6 +371,7 @@ struct pci_dev {
can be generated */ can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */ unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int pinned:1; /* Whether this dev is pinned */ unsigned int pinned:1; /* Whether this dev is pinned */
unsigned int config_rrs_sv:1; /* Config RRS software visibility */
unsigned int imm_ready:1; /* Supports Immediate Readiness */ unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */
......
...@@ -634,9 +634,11 @@ ...@@ -634,9 +634,11 @@
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */ #define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */ #define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ #define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ #define PCI_EXP_RTCTL_RRS_SVE 0x0010 /* Config RRS Software Visibility Enable */
#define PCI_EXP_RTCTL_CRSSVE PCI_EXP_RTCTL_RRS_SVE /* compatibility */
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTCAP_RRS_SV 0x0001 /* Config RRS Software Visibility */
#define PCI_EXP_RTCAP_CRSVIS PCI_EXP_RTCAP_RRS_SV /* compatibility */
#define PCI_EXP_RTSTA 0x20 /* Root Status */ #define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment