Commit 27e76d06 authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branch 'remotes/lorenzo/pci/aardvark'

- Define macros for PCI_EXP_DEVCTL_PAYLOAD_* (Pali Rohár)

- Set Max Payload Size to 512 bytes per Marvell spec (Pali Rohár)

- Downgrade PIO Response Status messages to debug level (Marek Behún)

- Preserve CRS SV (Config Request Retry Software Visibility) bit in
  emulated Root Control register (Pali Rohár)

- Fix issue in configuring reference clock (Pali Rohár)

- Don't clear status bits for masked interrupts (Pali Rohár)

- Don't mask unused interrupts (Pali Rohár)

- Avoid code repetition in advk_pcie_rd_conf() (Marek Behún)

- Retry config accesses on CRS response (Pali Rohár)

- Simplify emulated Root Capabilities initialization (Pali Rohár)

- Fix several link training issues (Pali Rohár)

- Fix link-up checking via LTSSM (Pali Rohár)

- Fix reporting of Data Link Layer Link Active (Pali Rohár)

- Fix emulation of W1C bits (Marek Behún)

- Fix MSI domain .alloc() method to return zero on success (Marek Behún)

- Read entire 16-bit MSI vector in MSI handler, not just low 8 bits (Marek
  Behún)

- Clear Root Port I/O Space, Memory Space, and Bus Master Enable bits at
  startup; PCI core will set those as necessary (Pali Rohár)

- When operating as a Root Port, set class code to "PCI Bridge" instead of
  the default "Mass Storage Controller" (Pali Rohár)

- Add emulation for PCI_BRIDGE_CTL_BUS_RESET since aardvark doesn't
  implement this per spec (Pali Rohár)

- Add emulation of option ROM BAR since aardvark doesn't implement this per
  spec (Pali Rohár)

* remotes/lorenzo/pci/aardvark:
  PCI: aardvark: Fix support for PCI_ROM_ADDRESS1 on emulated bridge
  PCI: aardvark: Fix support for PCI_BRIDGE_CTL_BUS_RESET on emulated bridge
  PCI: aardvark: Set PCI Bridge Class Code to PCI Bridge
  PCI: aardvark: Fix support for bus mastering and PCI_COMMAND on emulated bridge
  PCI: aardvark: Read all 16-bits from PCIE_MSI_PAYLOAD_REG
  PCI: aardvark: Fix return value of MSI domain .alloc() method
  PCI: pci-bridge-emul: Fix emulation of W1C bits
  PCI: aardvark: Fix reporting Data Link Layer Link Active
  PCI: aardvark: Fix checking for link up via LTSSM state
  PCI: aardvark: Fix link training
  PCI: aardvark: Simplify initialization of rootcap on virtual bridge
  PCI: aardvark: Implement re-issuing config requests on CRS response
  PCI: aardvark: Deduplicate code in advk_pcie_rd_conf()
  PCI: aardvark: Do not unmask unused interrupts
  PCI: aardvark: Do not clear status bits of masked interrupts
  PCI: aardvark: Fix configuring Reference clock
  PCI: aardvark: Fix preserving PCI_EXP_RTCTL_CRSSVE flag on emulated bridge
  PCI: aardvark: Don't spam about PIO Response Status
  PCI: aardvark: Fix PCIe Max Payload Size setting
  PCI: Add PCI_EXP_DEVCTL_PAYLOAD_* macros
parents 78be29ab 239edf68
...@@ -31,10 +31,8 @@ ...@@ -31,10 +31,8 @@
/* PCIe core registers */ /* PCIe core registers */
#define PCIE_CORE_DEV_ID_REG 0x0 #define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4 #define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
#define PCIE_CORE_DEV_REV_REG 0x8 #define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_EXP_ROM_BAR_REG 0x30
#define PCIE_CORE_PCIEXP_CAP 0xc0 #define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_ERR_CAPCTL_REG 0x118 #define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
...@@ -99,6 +97,7 @@ ...@@ -99,6 +97,7 @@
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
#define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14) #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
#define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1) #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
#define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2)
#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30) #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
#define PCIE_MSG_PM_PME_MASK BIT(7) #define PCIE_MSG_PM_PME_MASK BIT(7)
...@@ -106,18 +105,19 @@ ...@@ -106,18 +105,19 @@
#define PCIE_ISR0_MSI_INT_PENDING BIT(24) #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
#define PCIE_ISR0_ALL_MASK GENMASK(26, 0) #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
#define PCIE_ISR1_FLUSH BIT(5) #define PCIE_ISR1_FLUSH BIT(5)
#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
#define PCIE_ISR1_ALL_MASK GENMASK(11, 4) #define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
/* PCIe window configuration */ /* PCIe window configuration */
#define OB_WIN_BASE_ADDR 0x4c00 #define OB_WIN_BASE_ADDR 0x4c00
...@@ -164,8 +164,50 @@ ...@@ -164,8 +164,50 @@
#define CFG_REG (LMI_BASE_ADDR + 0x0) #define CFG_REG (LMI_BASE_ADDR + 0x0)
#define LTSSM_SHIFT 24 #define LTSSM_SHIFT 24
#define LTSSM_MASK 0x3f #define LTSSM_MASK 0x3f
#define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300 #define RC_BAR_CONFIG 0x300
/* LTSSM values in CFG_REG */
enum {
LTSSM_DETECT_QUIET = 0x0,
LTSSM_DETECT_ACTIVE = 0x1,
LTSSM_POLLING_ACTIVE = 0x2,
LTSSM_POLLING_COMPLIANCE = 0x3,
LTSSM_POLLING_CONFIGURATION = 0x4,
LTSSM_CONFIG_LINKWIDTH_START = 0x5,
LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
LTSSM_CONFIG_LANENUM_WAIT = 0x8,
LTSSM_CONFIG_COMPLETE = 0x9,
LTSSM_CONFIG_IDLE = 0xa,
LTSSM_RECOVERY_RCVR_LOCK = 0xb,
LTSSM_RECOVERY_SPEED = 0xc,
LTSSM_RECOVERY_RCVR_CFG = 0xd,
LTSSM_RECOVERY_IDLE = 0xe,
LTSSM_L0 = 0x10,
LTSSM_RX_L0S_ENTRY = 0x11,
LTSSM_RX_L0S_IDLE = 0x12,
LTSSM_RX_L0S_FTS = 0x13,
LTSSM_TX_L0S_ENTRY = 0x14,
LTSSM_TX_L0S_IDLE = 0x15,
LTSSM_TX_L0S_FTS = 0x16,
LTSSM_L1_ENTRY = 0x17,
LTSSM_L1_IDLE = 0x18,
LTSSM_L2_IDLE = 0x19,
LTSSM_L2_TRANSMIT_WAKE = 0x1a,
LTSSM_DISABLED = 0x20,
LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
LTSSM_HOT_RESET = 0x27,
LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
};
#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44) #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
/* PCIe core controller registers */ /* PCIe core controller registers */
...@@ -198,7 +240,7 @@ ...@@ -198,7 +240,7 @@
#define PCIE_IRQ_MSI_INT2_DET BIT(21) #define PCIE_IRQ_MSI_INT2_DET BIT(21)
#define PCIE_IRQ_RC_DBELL_DET BIT(22) #define PCIE_IRQ_RC_DBELL_DET BIT(22)
#define PCIE_IRQ_EP_STATUS BIT(23) #define PCIE_IRQ_EP_STATUS BIT(23)
#define PCIE_IRQ_ALL_MASK 0xfff0fb #define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
/* Transaction types */ /* Transaction types */
...@@ -257,18 +299,49 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) ...@@ -257,18 +299,49 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
return readl(pcie->base + reg); return readl(pcie->base + reg);
} }
static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg) static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
{ {
return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8); u32 val;
u8 ltssm_state;
val = advk_readl(pcie, CFG_REG);
ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
return ltssm_state;
} }
static int advk_pcie_link_up(struct advk_pcie *pcie) static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
{ {
u32 val, ltssm_state; /* check if LTSSM is in normal operation - some L* state */
u8 ltssm_state = advk_pcie_ltssm_state(pcie);
return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
}
val = advk_readl(pcie, CFG_REG); static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; {
return ltssm_state >= LTSSM_L0; /*
* According to PCIe Base specification 3.0, Table 4-14: Link
* Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
* is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
* L0s, L1 and L2 states. And according to 3.2.1. Data Link
* Control and Management State Machine Rules is DL Up status
* reported in DL Active state.
*/
u8 ltssm_state = advk_pcie_ltssm_state(pcie);
return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
}
static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
{
/*
* According to PCIe Base specification 3.0, Table 4-14: Link
* Status Mapped to the LTSSM is Link Training mapped to LTSSM
* Configuration and Recovery states.
*/
u8 ltssm_state = advk_pcie_ltssm_state(pcie);
return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
ltssm_state < LTSSM_L0) ||
(ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
} }
static int advk_pcie_wait_for_link(struct advk_pcie *pcie) static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
...@@ -291,7 +364,7 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) ...@@ -291,7 +364,7 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
size_t retries; size_t retries;
for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
if (!advk_pcie_link_up(pcie)) if (advk_pcie_link_training(pcie))
break; break;
udelay(RETRAIN_WAIT_USLEEP_US); udelay(RETRAIN_WAIT_USLEEP_US);
} }
...@@ -299,23 +372,9 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) ...@@ -299,23 +372,9 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
static void advk_pcie_issue_perst(struct advk_pcie *pcie) static void advk_pcie_issue_perst(struct advk_pcie *pcie)
{ {
u32 reg;
if (!pcie->reset_gpio) if (!pcie->reset_gpio)
return; return;
/*
* As required by PCI Express spec (PCI Express Base Specification, REV.
* 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay
* for at least 100ms after de-asserting PERST# signal is needed before
* link training is enabled. So ensure that link training is disabled
* prior de-asserting PERST# signal to fulfill that PCI Express spec
* requirement.
*/
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~LINK_TRAINING_EN;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/* 10ms delay is needed for some cards */ /* 10ms delay is needed for some cards */
dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
gpiod_set_value_cansleep(pcie->reset_gpio, 1); gpiod_set_value_cansleep(pcie->reset_gpio, 1);
...@@ -323,53 +382,46 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie) ...@@ -323,53 +382,46 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie)
gpiod_set_value_cansleep(pcie->reset_gpio, 0); gpiod_set_value_cansleep(pcie->reset_gpio, 0);
} }
static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen) static void advk_pcie_train_link(struct advk_pcie *pcie)
{ {
int ret, neg_gen; struct device *dev = &pcie->pdev->dev;
u32 reg; u32 reg;
int ret;
/* Setup link speed */ /*
* Setup PCIe rev / gen compliance based on device tree property
* 'max-link-speed' which also forces maximal link speed.
*/
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~PCIE_GEN_SEL_MSK; reg &= ~PCIE_GEN_SEL_MSK;
if (gen == 3) if (pcie->link_gen == 3)
reg |= SPEED_GEN_3; reg |= SPEED_GEN_3;
else if (gen == 2) else if (pcie->link_gen == 2)
reg |= SPEED_GEN_2; reg |= SPEED_GEN_2;
else else
reg |= SPEED_GEN_1; reg |= SPEED_GEN_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/* /*
* Enable link training. This is not needed in every call to this * Set maximal link speed value also into PCIe Link Control 2 register.
* function, just once suffices, but it does not break anything either. * Armada 3700 Functional Specification says that default value is based
* on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
*/ */
reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
reg &= ~PCI_EXP_LNKCTL2_TLS;
if (pcie->link_gen == 3)
reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
else if (pcie->link_gen == 2)
reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
else
reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
/* Enable link training after selecting PCIe generation */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg |= LINK_TRAINING_EN; reg |= LINK_TRAINING_EN;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/*
* Start link training immediately after enabling it.
* This solves problems for some buggy cards.
*/
reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
reg |= PCI_EXP_LNKCTL_RL;
advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
ret = advk_pcie_wait_for_link(pcie);
if (ret)
return ret;
reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
neg_gen = reg & PCI_EXP_LNKSTA_CLS;
return neg_gen;
}
static void advk_pcie_train_link(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
int neg_gen = -1, gen;
/* /*
* Reset PCIe card via PERST# signal. Some cards are not detected * Reset PCIe card via PERST# signal. Some cards are not detected
* during link training when they are in some non-initial state. * during link training when they are in some non-initial state.
...@@ -380,41 +432,18 @@ static void advk_pcie_train_link(struct advk_pcie *pcie) ...@@ -380,41 +432,18 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)
* PERST# signal could have been asserted by pinctrl subsystem before * PERST# signal could have been asserted by pinctrl subsystem before
* probe() callback has been called or issued explicitly by reset gpio * probe() callback has been called or issued explicitly by reset gpio
* function advk_pcie_issue_perst(), making the endpoint going into * function advk_pcie_issue_perst(), making the endpoint going into
* fundamental reset. As required by PCI Express spec a delay for at * fundamental reset. As required by PCI Express spec (PCI Express
* least 100ms after such a reset before link training is needed. * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
* Conventional Reset) a delay for at least 100ms after such a reset
* before sending a Configuration Request to the device is needed.
* So wait until PCIe link is up. Function advk_pcie_wait_for_link()
* waits for link at least 900ms.
*/ */
msleep(PCI_PM_D3COLD_WAIT); ret = advk_pcie_wait_for_link(pcie);
if (ret < 0)
/* dev_err(dev, "link never came up\n");
* Try link training at link gen specified by device tree property else
* 'max-link-speed'. If this fails, iteratively train at lower gen. dev_info(dev, "link up\n");
*/
for (gen = pcie->link_gen; gen > 0; --gen) {
neg_gen = advk_pcie_train_at_gen(pcie, gen);
if (neg_gen > 0)
break;
}
if (neg_gen < 0)
goto err;
/*
* After successful training if negotiated gen is lower than requested,
* train again on negotiated gen. This solves some stability issues for
* some buggy gen1 cards.
*/
if (neg_gen < gen) {
gen = neg_gen;
neg_gen = advk_pcie_train_at_gen(pcie, gen);
}
if (neg_gen == gen) {
dev_info(dev, "link up at gen %i\n", gen);
return;
}
err:
dev_err(dev, "link never came up\n");
} }
/* /*
...@@ -451,9 +480,15 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) ...@@ -451,9 +480,15 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
u32 reg; u32 reg;
int i; int i;
/* Enable TX */ /*
* Configure PCIe Reference clock. Direction is from the PCIe
* controller to the endpoint card, so enable transmitting of
* Reference clock differential signal off-chip and disable
* receiving off-chip differential signal.
*/
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG); reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
reg |= PCIE_CORE_REF_CLK_TX_ENABLE; reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG); advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
/* Set to Direct mode */ /* Set to Direct mode */
...@@ -477,6 +512,31 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) ...@@ -477,6 +512,31 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL; reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
advk_writel(pcie, reg, VENDOR_ID_REG); advk_writel(pcie, reg, VENDOR_ID_REG);
/*
* Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
* because the default value is Mass storage controller (0x010400).
*
* Note that this Aardvark PCI Bridge does not have compliant Type 1
* Configuration Space and it even cannot be accessed via Aardvark's
* PCI config space access method. Something like config space is
* available in internal Aardvark registers starting at offset 0x0
* and is reported as Type 0. In range 0x10 - 0x34 it has totally
* different registers.
*
* Therefore driver uses emulation of PCI Bridge which emulates
* access to configuration space via internal Aardvark registers or
* emulated configuration buffer.
*/
reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
reg &= ~0xffffff00;
reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
/* Disable Root Bridge I/O space, memory space and bus mastering */
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
/* Set Advanced Error Capabilities and Control PF0 register */ /* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
...@@ -488,8 +548,9 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) ...@@ -488,8 +548,9 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
reg &= ~PCI_EXP_DEVCTL_RELAX_EN; reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
reg &= ~PCI_EXP_DEVCTL_READRQ; reg &= ~PCI_EXP_DEVCTL_READRQ;
reg |= PCI_EXP_DEVCTL_PAYLOAD; /* Set max payload size */ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
reg |= PCI_EXP_DEVCTL_READRQ_512B; reg |= PCI_EXP_DEVCTL_READRQ_512B;
advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
...@@ -574,19 +635,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) ...@@ -574,19 +635,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_disable_ob_win(pcie, i); advk_pcie_disable_ob_win(pcie, i);
advk_pcie_train_link(pcie); advk_pcie_train_link(pcie);
/*
* FIXME: The following register update is suspicious. This register is
* applicable only when the PCI controller is configured for Endpoint
* mode, not as a Root Complex. But apparently when this code is
* removed, some cards stop working. This should be investigated and
* a comment explaining this should be put here.
*/
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
PCIE_CORE_CMD_IO_ACCESS_EN |
PCIE_CORE_CMD_MEM_IO_REQ_EN;
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
} }
static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val) static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
...@@ -595,6 +643,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -595,6 +643,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
u32 reg; u32 reg;
unsigned int status; unsigned int status;
char *strcomp_status, *str_posted; char *strcomp_status, *str_posted;
int ret;
reg = advk_readl(pcie, PIO_STAT); reg = advk_readl(pcie, PIO_STAT);
status = (reg & PIO_COMPLETION_STATUS_MASK) >> status = (reg & PIO_COMPLETION_STATUS_MASK) >>
...@@ -619,6 +668,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -619,6 +668,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
case PIO_COMPLETION_STATUS_OK: case PIO_COMPLETION_STATUS_OK:
if (reg & PIO_ERR_STATUS) { if (reg & PIO_ERR_STATUS) {
strcomp_status = "COMP_ERR"; strcomp_status = "COMP_ERR";
ret = -EFAULT;
break; break;
} }
/* Get the read result */ /* Get the read result */
...@@ -626,9 +676,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -626,9 +676,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
*val = advk_readl(pcie, PIO_RD_DATA); *val = advk_readl(pcie, PIO_RD_DATA);
/* No error */ /* No error */
strcomp_status = NULL; strcomp_status = NULL;
ret = 0;
break; break;
case PIO_COMPLETION_STATUS_UR: case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR"; strcomp_status = "UR";
ret = -EOPNOTSUPP;
break; break;
case PIO_COMPLETION_STATUS_CRS: case PIO_COMPLETION_STATUS_CRS:
if (allow_crs && val) { if (allow_crs && val) {
...@@ -646,6 +698,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -646,6 +698,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
*/ */
*val = CFG_RD_CRS_VAL; *val = CFG_RD_CRS_VAL;
strcomp_status = NULL; strcomp_status = NULL;
ret = 0;
break; break;
} }
/* PCIe r4.0, sec 2.3.2, says: /* PCIe r4.0, sec 2.3.2, says:
...@@ -661,31 +714,34 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 ...@@ -661,31 +714,34 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* Request and taking appropriate action, e.g., complete the * Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction. * Request to the host as a failed transaction.
* *
* To simplify implementation do not re-issue the Configuration * So return -EAGAIN and caller (pci-aardvark.c driver) will
* Request and complete the Request as a failed transaction. * re-issue request again up to the PIO_RETRY_CNT retries.
*/ */
strcomp_status = "CRS"; strcomp_status = "CRS";
ret = -EAGAIN;
break; break;
case PIO_COMPLETION_STATUS_CA: case PIO_COMPLETION_STATUS_CA:
strcomp_status = "CA"; strcomp_status = "CA";
ret = -ECANCELED;
break; break;
default: default:
strcomp_status = "Unknown"; strcomp_status = "Unknown";
ret = -EINVAL;
break; break;
} }
if (!strcomp_status) if (!strcomp_status)
return 0; return ret;
if (reg & PIO_NON_POSTED_REQ) if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted"; str_posted = "Non-posted";
else else
str_posted = "Posted"; str_posted = "Posted";
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n", dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
return -EFAULT; return ret;
} }
static int advk_pcie_wait_pio(struct advk_pcie *pcie) static int advk_pcie_wait_pio(struct advk_pcie *pcie)
...@@ -693,13 +749,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie) ...@@ -693,13 +749,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
struct device *dev = &pcie->pdev->dev; struct device *dev = &pcie->pdev->dev;
int i; int i;
for (i = 0; i < PIO_RETRY_CNT; i++) { for (i = 1; i <= PIO_RETRY_CNT; i++) {
u32 start, isr; u32 start, isr;
start = advk_readl(pcie, PIO_START); start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR); isr = advk_readl(pcie, PIO_ISR);
if (!start && isr) if (!start && isr)
return 0; return i;
udelay(PIO_RETRY_DELAY); udelay(PIO_RETRY_DELAY);
} }
...@@ -707,6 +763,72 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie) ...@@ -707,6 +763,72 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static pci_bridge_emul_read_status_t
advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
int reg, u32 *value)
{
struct advk_pcie *pcie = bridge->data;
switch (reg) {
case PCI_COMMAND:
*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
return PCI_BRIDGE_EMUL_HANDLED;
case PCI_ROM_ADDRESS1:
*value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
return PCI_BRIDGE_EMUL_HANDLED;
case PCI_INTERRUPT_LINE: {
/*
* From the whole 32bit register we support reading from HW only
* one bit: PCI_BRIDGE_CTL_BUS_RESET.
* Other bits are retrieved only from emulated config buffer.
*/
__le32 *cfgspace = (__le32 *)&bridge->conf;
u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
else
val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
*value = val;
return PCI_BRIDGE_EMUL_HANDLED;
}
default:
return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
}
static void
advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
int reg, u32 old, u32 new, u32 mask)
{
struct advk_pcie *pcie = bridge->data;
switch (reg) {
case PCI_COMMAND:
advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
break;
case PCI_ROM_ADDRESS1:
advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
break;
case PCI_INTERRUPT_LINE:
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
val |= HOT_RESET_GEN;
else
val &= ~HOT_RESET_GEN;
advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
}
break;
default:
break;
}
}
static pci_bridge_emul_read_status_t static pci_bridge_emul_read_status_t
advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
...@@ -723,6 +845,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, ...@@ -723,6 +845,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: { case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
*value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
*value |= PCI_EXP_RTCAP_CRSVIS << 16; *value |= PCI_EXP_RTCAP_CRSVIS << 16;
return PCI_BRIDGE_EMUL_HANDLED; return PCI_BRIDGE_EMUL_HANDLED;
} }
...@@ -734,12 +857,26 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, ...@@ -734,12 +857,26 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED; return PCI_BRIDGE_EMUL_HANDLED;
} }
case PCI_EXP_LNKCAP: {
u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
/*
* PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
* But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
* state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
*/
val |= PCI_EXP_LNKCAP_DLLLARC;
*value = val;
return PCI_BRIDGE_EMUL_HANDLED;
}
case PCI_EXP_LNKCTL: { case PCI_EXP_LNKCTL: {
/* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
~(PCI_EXP_LNKSTA_LT << 16); ~(PCI_EXP_LNKSTA_LT << 16);
if (!advk_pcie_link_up(pcie)) if (advk_pcie_link_training(pcie))
val |= (PCI_EXP_LNKSTA_LT << 16); val |= (PCI_EXP_LNKSTA_LT << 16);
if (advk_pcie_link_active(pcie))
val |= (PCI_EXP_LNKSTA_DLLLA << 16);
*value = val; *value = val;
return PCI_BRIDGE_EMUL_HANDLED; return PCI_BRIDGE_EMUL_HANDLED;
} }
...@@ -747,7 +884,6 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, ...@@ -747,7 +884,6 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_CAP_LIST_ID: case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP: case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL: case PCI_EXP_DEVCTL:
case PCI_EXP_LNKCAP:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED; return PCI_BRIDGE_EMUL_HANDLED;
default: default:
...@@ -794,6 +930,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, ...@@ -794,6 +930,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
} }
static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_base = advk_pci_bridge_emul_base_conf_read,
.write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read, .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write, .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
}; };
...@@ -805,7 +943,6 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { ...@@ -805,7 +943,6 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{ {
struct pci_bridge_emul *bridge = &pcie->bridge; struct pci_bridge_emul *bridge = &pcie->bridge;
int ret;
bridge->conf.vendor = bridge->conf.vendor =
cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff); cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
...@@ -825,19 +962,14 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) ...@@ -825,19 +962,14 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */ /* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
bridge->has_pcie = true; bridge->has_pcie = true;
bridge->data = pcie; bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops; bridge->ops = &advk_pci_bridge_emul_ops;
/* PCIe config space can be initialized after pci_bridge_emul_init() */ return pci_bridge_emul_init(bridge, 0);
ret = pci_bridge_emul_init(bridge, 0);
if (ret < 0)
return ret;
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
return 0;
} }
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
...@@ -889,6 +1021,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -889,6 +1021,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val) int where, int size, u32 *val)
{ {
struct advk_pcie *pcie = bus->sysdata; struct advk_pcie *pcie = bus->sysdata;
int retry_count;
bool allow_crs; bool allow_crs;
u32 reg; u32 reg;
int ret; int ret;
...@@ -911,18 +1044,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -911,18 +1044,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
PCI_EXP_RTCTL_CRSSVE); PCI_EXP_RTCTL_CRSSVE);
if (advk_pcie_pio_is_running(pcie)) { if (advk_pcie_pio_is_running(pcie))
/* goto try_crs;
* If it is possible return Completion Retry Status so caller
* tries to issue the request again instead of failing.
*/
if (allow_crs) {
*val = CFG_RD_CRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
/* Program the control register */ /* Program the control register */
reg = advk_readl(pcie, PIO_CTRL); reg = advk_readl(pcie, PIO_CTRL);
...@@ -941,30 +1064,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -941,30 +1064,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */ /* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
/* Clear PIO DONE ISR and start the transfer */ retry_count = 0;
advk_writel(pcie, 1, PIO_ISR); do {
advk_writel(pcie, 1, PIO_START); /* Clear PIO DONE ISR and start the transfer */
advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie); ret = advk_pcie_wait_pio(pcie);
if (ret < 0) { if (ret < 0)
/* goto try_crs;
* If it is possible return Completion Retry Status so caller
* tries to issue the request again instead of failing.
*/
if (allow_crs) {
*val = CFG_RD_CRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
/* Check PIO status and get the read result */ retry_count += ret;
ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
if (ret < 0) { /* Check PIO status and get the read result */
*val = 0xffffffff; ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
return PCIBIOS_SET_FAILED; } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
}
if (ret < 0)
goto fail;
if (size == 1) if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff; *val = (*val >> (8 * (where & 3))) & 0xff;
...@@ -972,6 +1089,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ...@@ -972,6 +1089,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
*val = (*val >> (8 * (where & 3))) & 0xffff; *val = (*val >> (8 * (where & 3))) & 0xffff;
return PCIBIOS_SUCCESSFUL; return PCIBIOS_SUCCESSFUL;
try_crs:
/*
* If it is possible, return Completion Retry Status so that caller
* tries to issue the request again instead of failing.
*/
if (allow_crs) {
*val = CFG_RD_CRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
fail:
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
} }
static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
...@@ -980,6 +1111,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, ...@@ -980,6 +1111,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
struct advk_pcie *pcie = bus->sysdata; struct advk_pcie *pcie = bus->sysdata;
u32 reg; u32 reg;
u32 data_strobe = 0x0; u32 data_strobe = 0x0;
int retry_count;
int offset; int offset;
int ret; int ret;
...@@ -1021,19 +1153,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, ...@@ -1021,19 +1153,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */ /* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
/* Clear PIO DONE ISR and start the transfer */ retry_count = 0;
advk_writel(pcie, 1, PIO_ISR); do {
advk_writel(pcie, 1, PIO_START); /* Clear PIO DONE ISR and start the transfer */
advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie); ret = advk_pcie_wait_pio(pcie);
if (ret < 0) if (ret < 0)
return PCIBIOS_SET_FAILED; return PCIBIOS_SET_FAILED;
ret = advk_pcie_check_pio_status(pcie, false, NULL); retry_count += ret;
if (ret < 0)
return PCIBIOS_SET_FAILED;
return PCIBIOS_SUCCESSFUL; ret = advk_pcie_check_pio_status(pcie, false, NULL);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
} }
static struct pci_ops advk_pcie_ops = { static struct pci_ops advk_pcie_ops = {
...@@ -1082,7 +1217,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, ...@@ -1082,7 +1217,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
domain->host_data, handle_simple_irq, domain->host_data, handle_simple_irq,
NULL, NULL); NULL, NULL);
return hwirq; return 0;
} }
static void advk_msi_irq_domain_free(struct irq_domain *domain, static void advk_msi_irq_domain_free(struct irq_domain *domain,
...@@ -1263,8 +1398,12 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) ...@@ -1263,8 +1398,12 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
if (!(BIT(msi_idx) & msi_status)) if (!(BIT(msi_idx) & msi_status))
continue; continue;
/*
* msi_idx contains bits [4:0] of the msi_data and msi_data
* contains 16bit MSI interrupt number
*/
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
generic_handle_irq(msi_data); generic_handle_irq(msi_data);
} }
...@@ -1286,12 +1425,6 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) ...@@ -1286,12 +1425,6 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
if (!isr0_status && !isr1_status) {
advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
return;
}
/* Process MSI interrupts */ /* Process MSI interrupts */
if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
advk_pcie_handle_msi(pcie); advk_pcie_handle_msi(pcie);
......
...@@ -431,8 +431,21 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where, ...@@ -431,8 +431,21 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
/* Clear the W1C bits */ /* Clear the W1C bits */
new &= ~((value << shift) & (behavior[reg / 4].w1c & mask)); new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
/* Save the new value with the cleared W1C bits into the cfgspace */
cfgspace[reg / 4] = cpu_to_le32(new); cfgspace[reg / 4] = cpu_to_le32(new);
/*
* Clear the W1C bits not specified by the write mask, so that the
* write_op() does not clear them.
*/
new &= ~(behavior[reg / 4].w1c & ~mask);
/*
* Set the W1C bits specified by the write mask, so that write_op()
* knows about that they are to be cleared.
*/
new |= (value << shift) & (behavior[reg / 4].w1c & mask);
if (write_op) if (write_op)
write_op(bridge, reg, old, new, mask); write_op(bridge, reg, old, new, mask);
......
...@@ -504,6 +504,12 @@ ...@@ -504,6 +504,12 @@
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ #define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ #define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ #define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */
#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */
#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */
#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */
#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ #define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ #define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment