Commit 37853932 authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branch 'pci/controller/dwc'

- Use msleep() in DWC core instead of usleep_range() for ~100 ms sleep
  (Konrad Dybcio)

- Fix iATU slot management to avoid using the wrong slot after PERST#
  assert/deassert, which could potentially cause DMA to go the wrong place
  (Frank Li)

- Consolidate dw_pcie_prog_outbound_atu() arguments into a struct to ease
  adding new functionality like initiating Message TLPs (Yoshihiro Shimoda)

- Add support for endpoints to initiate PCIe messages (Yoshihiro Shimoda)

- Add #defines for PCIe INTx messages (Yoshihiro Shimoda)

- Add support for endpoints to initiate PCIe PME_Turn_Off messages for
  system suspend (Frank Li)

- Add dw_pcie_ep_linkdown() to reinitialize registers that are lost when
  the link goes down (Manivannan Sadhasivam)

- Use dw_pcie_ep_linkdown() to reinitialize qcom non-sticky registers that
  are lost when the link goes down (Manivannan Sadhasivam)

- Enforce DWC limitation that 64-bit BARs must start with the even numbered
  BAR (Niklas Cassel)

* pci/controller/dwc:
  PCI: dwc: ep: Enforce DWC specific 64-bit BAR limitation
  PCI: layerscape-ep: Use the generic dw_pcie_ep_linkdown() API to handle Link Down event
  PCI: qcom-ep: Use the generic dw_pcie_ep_linkdown() API to handle Link Down event
  PCI: dwc: ep: Remove dw_pcie_ep_init_notify() wrapper
  PCI: dwc: ep: Add a generic dw_pcie_ep_linkdown() API to handle Link Down event
  PCI: dwc: Add generic MSG TLP support for sending PME_Turn_Off when system suspend
  PCI: Add PCIE_MSG_CODE_PME_TURN_OFF message macro
  PCI: Add PCIE_MSG_CODE_ASSERT_INTx message macros
  PCI: dwc: Add outbound MSG TLPs support
  PCI: dwc: Consolidate args of dw_pcie_prog_outbound_atu() into a structure
  PCI: dwc: Fix index 0 incorrectly being interpreted as a free ATU slot
  PCI: dwc: Use msleep() in dw_pcie_wait_for_link()
parents 35f0c94a 9b10e877
...@@ -474,7 +474,7 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, ...@@ -474,7 +474,7 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret; return ret;
} }
dw_pcie_ep_init_notify(ep); pci_epc_init_notify(ep->epc);
return 0; return 0;
} }
......
...@@ -1126,7 +1126,7 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, ...@@ -1126,7 +1126,7 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
return ret; return ret;
} }
dw_pcie_ep_init_notify(ep); pci_epc_init_notify(ep->epc);
/* Start LTSSM. */ /* Start LTSSM. */
imx6_pcie_ltssm_enable(dev); imx6_pcie_ltssm_enable(dev);
......
...@@ -1293,7 +1293,7 @@ static int ks_pcie_probe(struct platform_device *pdev) ...@@ -1293,7 +1293,7 @@ static int ks_pcie_probe(struct platform_device *pdev)
goto err_ep_init; goto err_ep_init;
} }
dw_pcie_ep_init_notify(&pci->ep); pci_epc_init_notify(pci->ep.epc);
break; break;
default: default:
......
...@@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id) ...@@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
dev_dbg(pci->dev, "Link up\n"); dev_dbg(pci->dev, "Link up\n");
} else if (val & PEX_PF0_PME_MES_DR_LDD) { } else if (val & PEX_PF0_PME_MES_DR_LDD) {
dev_dbg(pci->dev, "Link down\n"); dev_dbg(pci->dev, "Link down\n");
pci_epc_linkdown(pci->ep.epc); dw_pcie_ep_linkdown(&pci->ep);
} else if (val & PEX_PF0_PME_MES_DR_HRD) { } else if (val & PEX_PF0_PME_MES_DR_HRD) {
dev_dbg(pci->dev, "Hot reset\n"); dev_dbg(pci->dev, "Hot reset\n");
} }
...@@ -286,7 +286,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev) ...@@ -286,7 +286,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
return ret; return ret;
} }
dw_pcie_ep_init_notify(&pci->ep); pci_epc_init_notify(pci->ep.epc);
return ls_pcie_ep_interrupt_init(pcie, pdev); return ls_pcie_ep_interrupt_init(pcie, pdev);
} }
......
...@@ -452,7 +452,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev) ...@@ -452,7 +452,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
return ret; return ret;
} }
dw_pcie_ep_init_notify(&pci->ep); pci_epc_init_notify(pci->ep.epc);
break; break;
default: default:
......
...@@ -15,30 +15,6 @@ ...@@ -15,30 +15,6 @@
#include <linux/pci-epc.h> #include <linux/pci-epc.h>
#include <linux/pci-epf.h> #include <linux/pci-epf.h>
/**
* dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
* @ep: DWC EP device
*/
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
pci_epc_linkup(epc);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
/**
* dw_pcie_ep_init_notify - Notify EPF drivers about EPC initialization complete
* @ep: DWC EP device
*/
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
pci_epc_init_notify(epc);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
/** /**
* dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
* the endpoint function * the endpoint function
...@@ -161,7 +137,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, ...@@ -161,7 +137,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar]) if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else else
free_win = ep->bar_to_atu[bar]; free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) { if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n"); dev_err(pci->dev, "No free inbound window\n");
...@@ -175,15 +151,18 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, ...@@ -175,15 +151,18 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
return ret; return ret;
} }
ep->bar_to_atu[bar] = free_win; /*
* Always increment free_win before assignment, since value 0 is used to identify
* unallocated mapping.
*/
ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map); set_bit(free_win, ep->ib_window_map);
return 0; return 0;
} }
static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
phys_addr_t phys_addr, struct dw_pcie_ob_atu_cfg *atu)
u64 pci_addr, size_t size)
{ {
struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 free_win; u32 free_win;
...@@ -195,13 +174,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, ...@@ -195,13 +174,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL; return -EINVAL;
} }
ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, atu->index = free_win;
phys_addr, pci_addr, size); ret = dw_pcie_prog_outbound_atu(pci, atu);
if (ret) if (ret)
return ret; return ret;
set_bit(free_win, ep->ob_window_map); set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr; ep->outbound_addr[free_win] = atu->cpu_addr;
return 0; return 0;
} }
...@@ -212,7 +191,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ...@@ -212,7 +191,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno; enum pci_barno bar = epf_bar->barno;
u32 atu_index = ep->bar_to_atu[bar]; u32 atu_index = ep->bar_to_atu[bar] - 1;
if (!ep->bar_to_atu[bar])
return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
...@@ -233,6 +215,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ...@@ -233,6 +215,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret, type; int ret, type;
u32 reg; u32 reg;
/*
* DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
* 1 and 2 to form a 64-bit BAR.
*/
if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
return -EINVAL;
reg = PCI_BASE_ADDRESS_0 + (4 * bar); reg = PCI_BASE_ADDRESS_0 + (4 * bar);
if (!(flags & PCI_BASE_ADDRESS_SPACE)) if (!(flags & PCI_BASE_ADDRESS_SPACE))
...@@ -301,8 +290,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ...@@ -301,8 +290,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret; int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dw_pcie_ob_atu_cfg atu = { 0 };
ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
atu.func_no = func_no;
atu.type = PCIE_ATU_TYPE_MEM;
atu.cpu_addr = addr;
atu.pci_addr = pci_addr;
atu.size = size;
ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) { if (ret) {
dev_err(pci->dev, "Failed to enable address\n"); dev_err(pci->dev, "Failed to enable address\n");
return ret; return ret;
...@@ -673,6 +668,34 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) ...@@ -673,6 +668,34 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0; return 0;
} }
static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
unsigned int offset;
unsigned int nbars;
u32 reg, i;
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
dw_pcie_dbi_ro_wr_en(pci);
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
/*
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
* size in the range from 1 MB to 512 GB. Advertise support
* for 1 MB BAR size only.
*/
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
}
dw_pcie_setup(pci);
dw_pcie_dbi_ro_wr_dis(pci);
}
/** /**
* dw_pcie_ep_init_registers - Initialize DWC EP specific registers * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
* @ep: DWC EP device * @ep: DWC EP device
...@@ -687,13 +710,11 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) ...@@ -687,13 +710,11 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
struct dw_pcie_ep_func *ep_func; struct dw_pcie_ep_func *ep_func;
struct device *dev = pci->dev; struct device *dev = pci->dev;
struct pci_epc *epc = ep->epc; struct pci_epc *epc = ep->epc;
unsigned int offset, ptm_cap_base; u32 ptm_cap_base, reg;
unsigned int nbars;
u8 hdr_type; u8 hdr_type;
u8 func_no; u8 func_no;
int i, ret;
void *addr; void *addr;
u32 reg; int ret;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK; PCI_HEADER_TYPE_MASK;
...@@ -756,25 +777,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) ...@@ -756,25 +777,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ep->ops->init) if (ep->ops->init)
ep->ops->init(ep); ep->ops->init(ep);
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
dw_pcie_dbi_ro_wr_en(pci);
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
/*
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
* size in the range from 1 MB to 512 GB. Advertise support
* for 1 MB BAR size only.
*/
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
}
/* /*
* PTM responder capability can be disabled only after disabling * PTM responder capability can be disabled only after disabling
* PTM root capability. * PTM root capability.
...@@ -791,8 +795,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) ...@@ -791,8 +795,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci); dw_pcie_dbi_ro_wr_dis(pci);
} }
dw_pcie_setup(pci); dw_pcie_ep_init_non_sticky_registers(pci);
dw_pcie_dbi_ro_wr_dis(pci);
return 0; return 0;
...@@ -803,6 +806,43 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) ...@@ -803,6 +806,43 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
} }
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers); EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
/**
* dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
* @ep: DWC EP device
*/
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
pci_epc_linkup(epc);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
/**
* dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
* @ep: DWC EP device
*
* Non-sticky registers are also initialized before sending the notification to
* the EPF drivers. This is needed since the registers need to be initialized
* before the link comes back again.
*/
void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
/*
* Initialize the non-sticky DWC registers as they would've reset post
* Link Down. This is specifically needed for drivers not supporting
* PERST# as they have no way to reinitialize the registers before the
* link comes back again.
*/
dw_pcie_ep_init_non_sticky_registers(pci);
pci_epc_linkdown(epc);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
/** /**
* dw_pcie_ep_init - Initialize the endpoint device * dw_pcie_ep_init - Initialize the endpoint device
* @ep: DWC EP device * @ep: DWC EP device
......
...@@ -398,6 +398,32 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) ...@@ -398,6 +398,32 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0; return 0;
} }
static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct resource_entry *win;
struct resource *res;
win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
if (win) {
res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
if (!res)
return;
/*
* Allocate MSG TLP region of size 'region_align' at the end of
* the host bridge window.
*/
res->start = win->res->end - pci->region_align + 1;
res->end = win->res->end;
res->name = "msg";
res->flags = win->res->flags | IORESOURCE_BUSY;
if (!devm_request_resource(pci->dev, win->res, res))
pp->msg_res = res;
}
}
int dw_pcie_host_init(struct dw_pcie_rp *pp) int dw_pcie_host_init(struct dw_pcie_rp *pp)
{ {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
...@@ -484,6 +510,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) ...@@ -484,6 +510,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci); dw_pcie_iatu_detect(pci);
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
* on the value of 'region_align', this has to be done after
* dw_pcie_iatu_detect().
*
* Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
* make use of the generic MSG TLP implementation.
*/
if (pp->use_atu_msg)
dw_pcie_host_request_msg_tlp_res(pp);
ret = dw_pcie_edma_detect(pci); ret = dw_pcie_edma_detect(pci);
if (ret) if (ret)
goto err_free_msi; goto err_free_msi;
...@@ -554,6 +592,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, ...@@ -554,6 +592,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
{ {
struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dw_pcie_ob_atu_cfg atu = { 0 };
int type, ret; int type, ret;
u32 busdev; u32 busdev;
...@@ -576,8 +615,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, ...@@ -576,8 +615,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else else
type = PCIE_ATU_TYPE_CFG1; type = PCIE_ATU_TYPE_CFG1;
ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, atu.type = type;
pp->cfg0_size); atu.cpu_addr = pp->cfg0_base;
atu.pci_addr = busdev;
atu.size = pp->cfg0_size;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) if (ret)
return NULL; return NULL;
...@@ -589,6 +632,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, ...@@ -589,6 +632,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
{ {
struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret; int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val); ret = pci_generic_config_read(bus, devfn, where, size, val);
...@@ -596,9 +640,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, ...@@ -596,9 +640,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret; return ret;
if (pp->cfg0_io_shared) { if (pp->cfg0_io_shared) {
ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, atu.type = PCIE_ATU_TYPE_IO;
pp->io_base, pp->io_bus_addr, atu.cpu_addr = pp->io_base;
pp->io_size); atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) if (ret)
return PCIBIOS_SET_FAILED; return PCIBIOS_SET_FAILED;
} }
...@@ -611,6 +658,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, ...@@ -611,6 +658,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
{ {
struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret; int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val); ret = pci_generic_config_write(bus, devfn, where, size, val);
...@@ -618,9 +666,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, ...@@ -618,9 +666,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret; return ret;
if (pp->cfg0_io_shared) { if (pp->cfg0_io_shared) {
ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, atu.type = PCIE_ATU_TYPE_IO;
pp->io_base, pp->io_bus_addr, atu.cpu_addr = pp->io_base;
pp->io_size); atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) if (ret)
return PCIBIOS_SET_FAILED; return PCIBIOS_SET_FAILED;
} }
...@@ -655,6 +706,7 @@ static struct pci_ops dw_pcie_ops = { ...@@ -655,6 +706,7 @@ static struct pci_ops dw_pcie_ops = {
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{ {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dw_pcie_ob_atu_cfg atu = { 0 };
struct resource_entry *entry; struct resource_entry *entry;
int i, ret; int i, ret;
...@@ -682,10 +734,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) ...@@ -682,10 +734,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows <= ++i) if (pci->num_ob_windows <= ++i)
break; break;
ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, atu.index = i;
entry->res->start, atu.type = PCIE_ATU_TYPE_MEM;
entry->res->start - entry->offset, atu.cpu_addr = entry->res->start;
resource_size(entry->res)); atu.pci_addr = entry->res->start - entry->offset;
/* Adjust iATU size if MSG TLP region was allocated before */
if (pp->msg_res && pp->msg_res->parent == entry->res)
atu.size = resource_size(entry->res) -
resource_size(pp->msg_res);
else
atu.size = resource_size(entry->res);
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) { if (ret) {
dev_err(pci->dev, "Failed to set MEM range %pr\n", dev_err(pci->dev, "Failed to set MEM range %pr\n",
entry->res); entry->res);
...@@ -695,10 +756,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) ...@@ -695,10 +756,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pp->io_size) { if (pp->io_size) {
if (pci->num_ob_windows > ++i) { if (pci->num_ob_windows > ++i) {
ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, atu.index = i;
pp->io_base, atu.type = PCIE_ATU_TYPE_IO;
pp->io_bus_addr, atu.cpu_addr = pp->io_base;
pp->io_size); atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) { if (ret) {
dev_err(pci->dev, "Failed to set IO range %pr\n", dev_err(pci->dev, "Failed to set IO range %pr\n",
entry->res); entry->res);
...@@ -713,6 +777,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) ...@@ -713,6 +777,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows); pci->num_ob_windows);
pp->msg_atu_index = i;
i = 0; i = 0;
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
if (resource_type(entry->res) != IORESOURCE_MEM) if (resource_type(entry->res) != IORESOURCE_MEM)
...@@ -818,11 +884,47 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp) ...@@ -818,11 +884,47 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
} }
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
{
struct dw_pcie_ob_atu_cfg atu = { 0 };
void __iomem *mem;
int ret;
if (pci->num_ob_windows <= pci->pp.msg_atu_index)
return -ENOSPC;
if (!pci->pp.msg_res)
return -ENOSPC;
atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
atu.routing = PCIE_MSG_TYPE_R_BC;
atu.type = PCIE_ATU_TYPE_MSG;
atu.size = resource_size(pci->pp.msg_res);
atu.index = pci->pp.msg_atu_index;
atu.cpu_addr = pci->pp.msg_res->start;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return ret;
mem = ioremap(atu.cpu_addr, pci->region_align);
if (!mem)
return -ENOMEM;
/* A dummy write is converted to a Msg TLP */
writel(0, mem);
iounmap(mem);
return 0;
}
int dw_pcie_suspend_noirq(struct dw_pcie *pci) int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{ {
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val; u32 val;
int ret; int ret = 0;
/* /*
* If L1SS is supported, then do not put the link into L2 as some * If L1SS is supported, then do not put the link into L2 as some
...@@ -834,10 +936,13 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) ...@@ -834,10 +936,13 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT) if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
return 0; return 0;
if (!pci->pp.ops->pme_turn_off) if (pci->pp.ops->pme_turn_off)
return 0;
pci->pp.ops->pme_turn_off(&pci->pp); pci->pp.ops->pme_turn_off(&pci->pp);
else
ret = dw_pcie_pme_turn_off(pci);
if (ret)
return ret;
ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE, ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US/10,
......
...@@ -154,7 +154,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) ...@@ -154,7 +154,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
dw_pcie_ep_deinit(&pci->ep); dw_pcie_ep_deinit(&pci->ep);
} }
dw_pcie_ep_init_notify(&pci->ep); pci_epc_init_notify(pci->ep.epc);
break; break;
default: default:
......
...@@ -465,56 +465,61 @@ static inline u32 dw_pcie_enable_ecrc(u32 val) ...@@ -465,56 +465,61 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD; return val | PCIE_ATU_TD;
} }
static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
int index, int type, u64 cpu_addr, const struct dw_pcie_ob_atu_cfg *atu)
u64 pci_addr, u64 size)
{ {
u64 cpu_addr = atu->cpu_addr;
u32 retries, val; u32 retries, val;
u64 limit_addr; u64 limit_addr;
if (pci->ops && pci->ops->cpu_addr_fixup) if (pci->ops && pci->ops->cpu_addr_fixup)
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
limit_addr = cpu_addr + size - 1; limit_addr = cpu_addr + atu->size - 1;
if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
!IS_ALIGNED(cpu_addr, pci->region_align) || !IS_ALIGNED(cpu_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) { !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL; return -EINVAL;
} }
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE, dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
lower_32_bits(cpu_addr)); lower_32_bits(cpu_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE, dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
upper_32_bits(cpu_addr)); upper_32_bits(cpu_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT, dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr)); lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A)) if (dw_pcie_ver_is_ge(pci, 460A))
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT, dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr)); upper_32_bits(limit_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET, dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr)); lower_32_bits(atu->pci_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET, dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr)); upper_32_bits(atu->pci_addr));
val = type | PCIE_ATU_FUNC_NUM(func_no); val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
dw_pcie_ver_is_ge(pci, 460A)) dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE; val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A)) if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val); val = dw_pcie_enable_ecrc(val);
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val); dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); val = PCIE_ATU_ENABLE;
if (atu->type == PCIE_ATU_TYPE_MSG) {
/* The data-less messages only for now */
val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
}
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/* /*
* Make sure ATU enable takes effect before any subsequent config * Make sure ATU enable takes effect before any subsequent config
* and I/O accesses. * and I/O accesses.
*/ */
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2); val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE) if (val & PCIE_ATU_ENABLE)
return 0; return 0;
...@@ -526,21 +531,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, ...@@ -526,21 +531,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size)
{
return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
cpu_addr, pci_addr, size);
}
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr,
u64 size)
{
return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
cpu_addr, pci_addr, size);
}
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg) static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{ {
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg); return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
...@@ -655,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) ...@@ -655,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
if (dw_pcie_link_up(pci)) if (dw_pcie_link_up(pci))
break; break;
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); msleep(LINK_WAIT_SLEEP_MS);
} }
if (retries >= LINK_WAIT_MAX_RETRIES) { if (retries >= LINK_WAIT_MAX_RETRIES) {
......
...@@ -63,8 +63,7 @@ ...@@ -63,8 +63,7 @@
/* Parameters for the waiting for link up routine */ /* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000 #define LINK_WAIT_SLEEP_MS 90
#define LINK_WAIT_USLEEP_MAX 100000
/* Parameters for the waiting for iATU enabled routine */ /* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5 #define LINK_WAIT_MAX_IATU_RETRIES 5
...@@ -148,11 +147,13 @@ ...@@ -148,11 +147,13 @@
#define PCIE_ATU_TYPE_IO 0x2 #define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4 #define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5 #define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8) #define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) #define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004 #define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31) #define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30) #define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19) #define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008 #define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C #define PCIE_ATU_UPPER_BASE 0x00C
...@@ -299,6 +300,17 @@ enum dw_pcie_ltssm { ...@@ -299,6 +300,17 @@ enum dw_pcie_ltssm {
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF, DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
}; };
struct dw_pcie_ob_atu_cfg {
int index;
int type;
u8 func_no;
u8 code;
u8 routing;
u64 cpu_addr;
u64 pci_addr;
u64 size;
};
struct dw_pcie_host_ops { struct dw_pcie_host_ops {
int (*init)(struct dw_pcie_rp *pp); int (*init)(struct dw_pcie_rp *pp);
void (*deinit)(struct dw_pcie_rp *pp); void (*deinit)(struct dw_pcie_rp *pp);
...@@ -328,6 +340,9 @@ struct dw_pcie_rp { ...@@ -328,6 +340,9 @@ struct dw_pcie_rp {
struct pci_host_bridge *bridge; struct pci_host_bridge *bridge;
raw_spinlock_t lock; raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
bool use_atu_msg;
int msg_atu_index;
struct resource *msg_res;
}; };
struct dw_pcie_ep_ops { struct dw_pcie_ep_ops {
...@@ -433,10 +448,8 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val); ...@@ -433,10 +448,8 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci); int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci); void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci); int dw_pcie_wait_for_link(struct dw_pcie *pci);
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
u64 cpu_addr, u64 pci_addr, u64 size); const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size); u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
...@@ -668,9 +681,9 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, ...@@ -668,9 +681,9 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP #ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep); int dw_pcie_ep_init(struct dw_pcie_ep *ep);
int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep); int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_deinit(struct dw_pcie_ep *ep); void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep); void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no); int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
...@@ -688,18 +701,18 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) ...@@ -688,18 +701,18 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{ {
} }
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
{ {
return 0;
} }
static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{ {
return 0; return 0;
} }
static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{ {
return 0;
} }
static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
......
...@@ -442,7 +442,7 @@ static int keembay_pcie_probe(struct platform_device *pdev) ...@@ -442,7 +442,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return ret; return ret;
} }
dw_pcie_ep_init_notify(&pci->ep); pci_epc_init_notify(pci->ep.epc);
break; break;
default: default:
......
...@@ -482,7 +482,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) ...@@ -482,7 +482,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_MSTR_AXI_CLK_EN; val &= ~PARF_MSTR_AXI_CLK_EN;
writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
dw_pcie_ep_init_notify(&pcie_ep->pci.ep); pci_epc_init_notify(pcie_ep->pci.ep.epc);
/* Enable LTSSM */ /* Enable LTSSM */
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM); val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
...@@ -641,7 +641,7 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) ...@@ -641,7 +641,7 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) { if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n"); dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN; pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
pci_epc_linkdown(pci->ep.epc); dw_pcie_ep_linkdown(&pci->ep);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) { } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
dev_dbg(dev, "Received Bus Master Enable event\n"); dev_dbg(dev, "Received Bus Master Enable event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED; pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
......
...@@ -437,7 +437,7 @@ static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar) ...@@ -437,7 +437,7 @@ static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
rcar_gen4_pcie_ep_deinit(rcar); rcar_gen4_pcie_ep_deinit(rcar);
} }
dw_pcie_ep_init_notify(ep); pci_epc_init_notify(ep->epc);
return ret; return ret;
} }
......
...@@ -1902,7 +1902,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) ...@@ -1902,7 +1902,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_init_complete; goto fail_init_complete;
} }
dw_pcie_ep_init_notify(ep); pci_epc_init_notify(ep->epc);
/* Program the private control to allow sending LTR upstream */ /* Program the private control to allow sending LTR upstream */
if (pcie->of_data->has_ltr_req_fix) { if (pcie->of_data->has_ltr_req_fix) {
......
...@@ -410,7 +410,7 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) ...@@ -410,7 +410,7 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret; return ret;
} }
dw_pcie_ep_init_notify(&priv->pci.ep); pci_epc_init_notify(priv->pci.ep.epc);
return 0; return 0;
} }
......
...@@ -22,6 +22,27 @@ ...@@ -22,6 +22,27 @@
*/ */
#define PCIE_PME_TO_L2_TIMEOUT_US 10000 #define PCIE_PME_TO_L2_TIMEOUT_US 10000
/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
#define PCIE_MSG_TYPE_R_RC 0
#define PCIE_MSG_TYPE_R_ADDR 1
#define PCIE_MSG_TYPE_R_ID 2
#define PCIE_MSG_TYPE_R_BC 3
#define PCIE_MSG_TYPE_R_LOCAL 4
#define PCIE_MSG_TYPE_R_GATHER 5
/* Power Management Messages; PCIe r6.0, sec 2.2.8.2 */
#define PCIE_MSG_CODE_PME_TURN_OFF 0x19
/* INTx Mechanism Messages; PCIe r6.0, sec 2.2.8.1 */
#define PCIE_MSG_CODE_ASSERT_INTA 0x20
#define PCIE_MSG_CODE_ASSERT_INTB 0x21
#define PCIE_MSG_CODE_ASSERT_INTC 0x22
#define PCIE_MSG_CODE_ASSERT_INTD 0x23
#define PCIE_MSG_CODE_DEASSERT_INTA 0x24
#define PCIE_MSG_CODE_DEASSERT_INTB 0x25
#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
extern const unsigned char pcie_link_speed[]; extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump; extern bool pci_early_dump;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment