Commit bd22885a authored by Tom Joseph's avatar Tom Joseph Committed by Lorenzo Pieralisi

PCI: cadence: Refactor driver to use as a core library

Cadence PCIe host and endpoint IP may be embedded into a variety of
SoCs/platforms. Let's extract the platform related APIs/Structures in the
current driver to a separate file (pcie-cadence-plat.c), such that the
common functionality can be used by future platforms.
Signed-off-by: default avatarTom Joseph <tjoseph@cadence.com>
Signed-off-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: default avatarAndrew Murray <andrew.murray@arm.com>
parent 54ecb8f7
...@@ -28,23 +28,38 @@ config PCIE_CADENCE ...@@ -28,23 +28,38 @@ config PCIE_CADENCE
bool bool
config PCIE_CADENCE_HOST config PCIE_CADENCE_HOST
bool "Cadence PCIe host controller" bool
depends on OF depends on OF
depends on PCI
select IRQ_DOMAIN select IRQ_DOMAIN
select PCIE_CADENCE select PCIE_CADENCE
help
Say Y here if you want to support the Cadence PCIe controller in host
mode. This PCIe controller may be embedded into many different vendors
SoCs.
config PCIE_CADENCE_EP config PCIE_CADENCE_EP
bool "Cadence PCIe endpoint controller" bool
depends on OF depends on OF
depends on PCI_ENDPOINT depends on PCI_ENDPOINT
select PCIE_CADENCE select PCIE_CADENCE
config PCIE_CADENCE_PLAT
bool
config PCIE_CADENCE_PLAT_HOST
bool "Cadence PCIe platform host controller"
depends on OF
select PCIE_CADENCE_HOST
select PCIE_CADENCE_PLAT
help
Say Y here if you want to support the Cadence PCIe platform controller in
host mode. This PCIe controller may be embedded into many different
vendors SoCs.
config PCIE_CADENCE_PLAT_EP
bool "Cadence PCIe platform endpoint controller"
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help help
Say Y here if you want to support the Cadence PCIe controller in Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs. different vendors SoCs.
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
......
...@@ -17,35 +17,6 @@ ...@@ -17,35 +17,6 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
/**
* struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
* @pcie: Cadence PCIe controller
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
* @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
* the sending of a memory write (MSI) / normal message (legacy
* IRQ) TLP through the PCIe bus.
* @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/legacy IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted legacy IRQs.
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
u32 max_regions;
unsigned long ob_region_map;
phys_addr_t *ob_addr;
phys_addr_t irq_phys_addr;
void __iomem *irq_cpu_addr;
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
};
static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
struct pci_epf_header *hdr) struct pci_epf_header *hdr)
{ {
...@@ -424,28 +395,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { ...@@ -424,28 +395,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features, .get_features = cdns_pcie_ep_get_features,
}; };
static const struct of_device_id cdns_pcie_ep_of_match[] = {
{ .compatible = "cdns,cdns-pcie-ep" },
{ },
};
static int cdns_pcie_ep_probe(struct platform_device *pdev) int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{ {
struct device *dev = &pdev->dev; struct device *dev = ep->pcie.dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct cdns_pcie_ep *ep; struct cdns_pcie *pcie = &ep->pcie;
struct cdns_pcie *pcie;
struct pci_epc *epc;
struct resource *res; struct resource *res;
struct pci_epc *epc;
int ret; int ret;
int phy_count;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
pcie = &ep->pcie;
pcie->is_rc = false; pcie->is_rc = false;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
...@@ -474,19 +434,6 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) ...@@ -474,19 +434,6 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr) if (!ep->ob_addr)
return -ENOMEM; return -ENOMEM;
ret = cdns_pcie_init_phy(dev, pcie);
if (ret) {
dev_err(dev, "failed to init phy\n");
return ret;
}
platform_set_drvdata(pdev, pcie);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
goto err_get_sync;
}
/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
...@@ -528,38 +475,5 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) ...@@ -528,38 +475,5 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_init: err_init:
pm_runtime_put_sync(dev); pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
phy_count = pcie->phy_count;
while (phy_count--)
device_link_del(pcie->link[phy_count]);
return ret; return ret;
} }
static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_dbg(dev, "pm_runtime_put_sync failed\n");
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
}
static struct platform_driver cdns_pcie_ep_driver = {
.driver = {
.name = "cdns-pcie-ep",
.of_match_table = cdns_pcie_ep_of_match,
.pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_ep_probe,
.shutdown = cdns_pcie_ep_shutdown,
};
builtin_platform_driver(cdns_pcie_ep_driver);
...@@ -11,33 +11,6 @@ ...@@ -11,33 +11,6 @@
#include "pcie-cadence.h" #include "pcie-cadence.h"
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
* @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
* @max_regions: maximum number of regions supported by the hardware
* @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
* translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
struct device *dev;
struct resource *cfg_res;
struct resource *bus_range;
void __iomem *cfg_base;
u32 max_regions;
u32 no_bar_nbits;
u16 vendor_id;
u16 device_id;
};
static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where) int where)
{ {
...@@ -92,11 +65,6 @@ static struct pci_ops cdns_pcie_host_ops = { ...@@ -92,11 +65,6 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write, .write = pci_generic_config_write,
}; };
static const struct of_device_id cdns_pcie_host_of_match[] = {
{ .compatible = "cdns,cdns-pcie-host" },
{ },
};
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{ {
...@@ -136,10 +104,10 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) ...@@ -136,10 +104,10 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{ {
struct cdns_pcie *pcie = &rc->pcie; struct cdns_pcie *pcie = &rc->pcie;
struct resource *cfg_res = rc->cfg_res;
struct resource *mem_res = pcie->mem_res; struct resource *mem_res = pcie->mem_res;
struct resource *bus_range = rc->bus_range; struct resource *bus_range = rc->bus_range;
struct device *dev = rc->dev; struct resource *cfg_res = rc->cfg_res;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct of_pci_range_parser parser; struct of_pci_range_parser parser;
struct of_pci_range range; struct of_pci_range range;
...@@ -233,25 +201,21 @@ static int cdns_pcie_host_init(struct device *dev, ...@@ -233,25 +201,21 @@ static int cdns_pcie_host_init(struct device *dev,
return err; return err;
} }
static int cdns_pcie_host_probe(struct platform_device *pdev) int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{ {
struct device *dev = &pdev->dev; struct device *dev = rc->pcie.dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge; struct pci_host_bridge *bridge;
struct list_head resources; struct list_head resources;
struct cdns_pcie_rc *rc;
struct cdns_pcie *pcie; struct cdns_pcie *pcie;
struct resource *res; struct resource *res;
int ret; int ret;
int phy_count;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); bridge = pci_host_bridge_from_priv(rc);
if (!bridge) if (!bridge)
return -ENOMEM; return -ENOMEM;
rc = pci_host_bridge_priv(bridge);
rc->dev = dev;
pcie = &rc->pcie; pcie = &rc->pcie;
pcie->is_rc = true; pcie->is_rc = true;
...@@ -287,21 +251,8 @@ static int cdns_pcie_host_probe(struct platform_device *pdev) ...@@ -287,21 +251,8 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
dev_err(dev, "missing \"mem\"\n"); dev_err(dev, "missing \"mem\"\n");
return -EINVAL; return -EINVAL;
} }
pcie->mem_res = res;
ret = cdns_pcie_init_phy(dev, pcie); pcie->mem_res = res;
if (ret) {
dev_err(dev, "failed to init phy\n");
return ret;
}
platform_set_drvdata(pdev, pcie);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
goto err_get_sync;
}
ret = cdns_pcie_host_init(dev, &resources, rc); ret = cdns_pcie_host_init(dev, &resources, rc);
if (ret) if (ret)
...@@ -326,37 +277,5 @@ static int cdns_pcie_host_probe(struct platform_device *pdev) ...@@ -326,37 +277,5 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_init: err_init:
pm_runtime_put_sync(dev); pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
phy_count = pcie->phy_count;
while (phy_count--)
device_link_del(pcie->link[phy_count]);
return ret; return ret;
} }
static void cdns_pcie_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_dbg(dev, "pm_runtime_put_sync failed\n");
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
}
static struct platform_driver cdns_pcie_host_driver = {
.driver = {
.name = "cdns-pcie-host",
.of_match_table = cdns_pcie_host_of_match,
.pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_host_probe,
.shutdown = cdns_pcie_shutdown,
};
builtin_platform_driver(cdns_pcie_host_driver);
// SPDX-License-Identifier: GPL-2.0
/*
* Cadence PCIe platform driver.
*
* Copyright (c) 2019, Cadence Design Systems
* Author: Tom Joseph <tjoseph@cadence.com>
*/
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
#include "pcie-cadence.h"
/**
* struct cdns_plat_pcie - private data for this PCIe platform driver
* @pcie: Cadence PCIe controller
* @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
* if 0 it is in Endpoint mode.
*/
struct cdns_plat_pcie {
struct cdns_pcie *pcie;
bool is_rc;
};
struct cdns_plat_pcie_of_data {
bool is_rc;
};
static const struct of_device_id cdns_plat_pcie_of_match[];
static int cdns_plat_pcie_probe(struct platform_device *pdev)
{
const struct cdns_plat_pcie_of_data *data;
struct cdns_plat_pcie *cdns_plat_pcie;
const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct cdns_pcie_ep *ep;
struct cdns_pcie_rc *rc;
int phy_count;
bool is_rc;
int ret;
match = of_match_device(cdns_plat_pcie_of_match, dev);
if (!match)
return -EINVAL;
data = (struct cdns_plat_pcie_of_data *)match->data;
is_rc = data->is_rc;
pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL);
if (!cdns_plat_pcie)
return -ENOMEM;
platform_set_drvdata(pdev, cdns_plat_pcie);
if (is_rc) {
if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
if (!bridge)
return -ENOMEM;
rc = pci_host_bridge_priv(bridge);
rc->pcie.dev = dev;
cdns_plat_pcie->pcie = &rc->pcie;
cdns_plat_pcie->is_rc = is_rc;
ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
if (ret) {
dev_err(dev, "failed to init phy\n");
return ret;
}
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
goto err_get_sync;
}
ret = cdns_pcie_host_setup(rc);
if (ret)
goto err_init;
} else {
if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
ep->pcie.dev = dev;
cdns_plat_pcie->pcie = &ep->pcie;
cdns_plat_pcie->is_rc = is_rc;
ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
if (ret) {
dev_err(dev, "failed to init phy\n");
return ret;
}
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
goto err_get_sync;
}
ret = cdns_pcie_ep_setup(ep);
if (ret)
goto err_init;
}
err_init:
pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
cdns_pcie_disable_phy(cdns_plat_pcie->pcie);
phy_count = cdns_plat_pcie->pcie->phy_count;
while (phy_count--)
device_link_del(cdns_plat_pcie->pcie->link[phy_count]);
return 0;
}
static void cdns_plat_pcie_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_dbg(dev, "pm_runtime_put_sync failed\n");
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
}
static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = {
.is_rc = true,
};
static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = {
.is_rc = false,
};
static const struct of_device_id cdns_plat_pcie_of_match[] = {
{
.compatible = "cdns,cdns-pcie-host",
.data = &cdns_plat_pcie_host_of_data,
},
{
.compatible = "cdns,cdns-pcie-ep",
.data = &cdns_plat_pcie_ep_of_data,
},
{},
};
static struct platform_driver cdns_plat_pcie_driver = {
.driver = {
.name = "cdns-pcie",
.of_match_table = cdns_plat_pcie_of_match,
.pm = &cdns_pcie_pm_ops,
},
.probe = cdns_plat_pcie_probe,
.shutdown = cdns_plat_pcie_shutdown,
};
builtin_platform_driver(cdns_plat_pcie_driver);
...@@ -190,6 +190,8 @@ enum cdns_pcie_rp_bar { ...@@ -190,6 +190,8 @@ enum cdns_pcie_rp_bar {
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
#define CDNS_PCIE_MSG_NO_DATA BIT(16) #define CDNS_PCIE_MSG_NO_DATA BIT(16)
struct cdns_pcie;
enum cdns_pcie_msg_code { enum cdns_pcie_msg_code {
MSG_CODE_ASSERT_INTA = 0x20, MSG_CODE_ASSERT_INTA = 0x20,
MSG_CODE_ASSERT_INTB = 0x21, MSG_CODE_ASSERT_INTB = 0x21,
...@@ -231,13 +233,71 @@ enum cdns_pcie_msg_routing { ...@@ -231,13 +233,71 @@ enum cdns_pcie_msg_routing {
struct cdns_pcie { struct cdns_pcie {
void __iomem *reg_base; void __iomem *reg_base;
struct resource *mem_res; struct resource *mem_res;
struct device *dev;
bool is_rc; bool is_rc;
u8 bus; u8 bus;
int phy_count; int phy_count;
struct phy **phy; struct phy **phy;
struct device_link **link; struct device_link **link;
const struct cdns_pcie_common_ops *ops;
};
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
* @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
* @max_regions: maximum number of regions supported by the hardware
* @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
* translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
struct resource *cfg_res;
struct resource *bus_range;
void __iomem *cfg_base;
u32 max_regions;
u32 no_bar_nbits;
u16 vendor_id;
u16 device_id;
}; };
/**
* struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
* @pcie: Cadence PCIe controller
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
* @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
* the sending of a memory write (MSI) / normal message (legacy
* IRQ) TLP through the PCIe bus.
* @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/legacy IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted legacy IRQs.
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
u32 max_regions;
unsigned long ob_region_map;
phys_addr_t *ob_addr;
phys_addr_t irq_phys_addr;
void __iomem *irq_cpu_addr;
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
};
/* Register access */ /* Register access */
static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
{ {
...@@ -306,6 +366,23 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) ...@@ -306,6 +366,23 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
} }
#ifdef CONFIG_PCIE_CADENCE_HOST
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
#else
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
}
#endif
#ifdef CONFIG_PCIE_CADENCE_EP
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
#endif
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
u32 r, bool is_io, u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size); u64 cpu_addr, u64 pci_addr, size_t size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment