Commit dd5bba52 authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branches 'pci/host-aardvark', 'pci/host-altera', 'pci/host-iproc',...

Merge branches 'pci/host-aardvark', 'pci/host-altera', 'pci/host-iproc', 'pci/host-mvebu', 'pci/host-rcar', 'pci/host-rockchip', 'pci/host-tegra', 'pci/host-xgene' and 'pci/host-xilinx' into next

* pci/host-aardvark:
  MAINTAINERS: Add DT binding to the Aardvark PCIe driver maintainer
  PCI: aardvark: Remove unused platform data
  PCI: aardvark: Add local struct device pointers

* pci/host-altera:
  PCI: altera: Simplify TLP_CFG_DW1 usage
  PCI: altera: Simplify TLB_CFG_DW0 usage
  PCI: altera: Rename altera_pcie_valid_config() to altera_pcie_valid_device()
  PCI: altera: Remove redundant platform_get_resource() return value check
  PCI: altera: Remove unused platform data
  PCI: altera: Add local struct device pointers

* pci/host-iproc:
  PCI: iproc: Hard-code PCIe capability offset instead of searching
  PCI: iproc: Remove redundant null pointer checking
  PCI: iproc: Validate CSR base in BCMA setup code
  PCI: iproc: Set drvdata at end of probe function
  PCI: iproc: Add local struct device pointers

* pci/host-mvebu:
  PCI: mvebu: Use existing of_node pointer
  PCI: mvebu: Add local struct device pointers

* pci/host-rcar:
  PCI: rcar-gen2: Add local struct device pointers
  PCI: rcar: Remove DRV_NAME macro
  PCI: rcar: Remove unused rcar_pcie_get_resources() platform_device arg
  PCI: rcar: Remove unused platform data
  PCI: rcar: Add local struct device pointers

* pci/host-rockchip:
  PCI: rockchip: Indent "if" statement body
  PCI: rockchip: Remove unused platform data

* pci/host-tegra:
  PCI: tegra: Remove unused platform data
  PCI: tegra: Add local struct device pointers
  PCI: tegra: Fix argument order in tegra_pcie_phy_disable()

* pci/host-xgene:
  PCI: xgene: Add register accessors
  PCI: xgene: Pass struct xgene_pcie_port to setup functions
  PCI: xgene: Remove unused platform data
  PCI: xgene: Add local struct device pointers

* pci/host-xilinx:
  PCI: xilinx-nwl: Remove unused platform data
  PCI: xilinx-nwl: Add local struct device pointers
  PCI: xilinx: Removed unused xilinx_pcie_assign_msi() argument
  PCI: xilinx: Remove unused platform data
  PCI: xilinx: Add local struct device pointers
......@@ -8982,6 +8982,7 @@ M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
F: drivers/pci/host/pci-aardvark.c
PCI DRIVER FOR NVIDIA TEGRA
......
......@@ -230,20 +230,20 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (advk_pcie_link_up(pcie)) {
dev_info(&pcie->pdev->dev, "link up\n");
dev_info(dev, "link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
dev_err(&pcie->pdev->dev, "link never came up\n");
dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
......@@ -376,6 +376,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
unsigned int status;
char *strcomp_status, *str_posted;
......@@ -407,12 +408,13 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
else
str_posted = "Posted";
dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
......@@ -426,7 +428,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return 0;
}
dev_err(&pcie->pdev->dev, "config read/write timed out\n");
dev_err(dev, "config read/write timed out\n");
return -ETIMEDOUT;
}
......@@ -560,10 +562,11 @@ static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
{
struct device *dev = &pcie->pdev->dev;
mutex_lock(&pcie->msi_used_lock);
if (!test_bit(hwirq, pcie->msi_irq_in_use))
dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
hwirq);
dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
else
clear_bit(hwirq, pcie->msi_irq_in_use);
mutex_unlock(&pcie->msi_used_lock);
......@@ -910,6 +913,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
......@@ -917,31 +921,29 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device_node *msi_node;
int ret, irq;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pcie->base = devm_ioremap_resource(&pdev->dev, res);
pcie->base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to register interrupt\n");
dev_err(dev, "Failed to register interrupt\n");
return ret;
}
ret = advk_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to parse resources\n");
dev_err(dev, "Failed to parse resources\n");
return ret;
}
......@@ -949,24 +951,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize irq\n");
dev_err(dev, "Failed to initialize irq\n");
return ret;
}
ret = advk_pcie_init_msi_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize irq\n");
dev_err(dev, "Failed to initialize irq\n");
advk_pcie_remove_irq_domain(pcie);
return ret;
}
msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
if (msi_node)
msi = of_pci_find_msi_chip_by_node(msi_node);
else
msi = NULL;
bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
pcie, &pcie->resources, &pcie->msi);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
......@@ -980,7 +982,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
}
......
......@@ -1190,13 +1190,13 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
......@@ -1206,7 +1206,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(&pdev->dev, "invalid memory aperture size\n");
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
......@@ -1224,20 +1224,18 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
if (ret) {
dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
ret);
dev_err(dev, "failed to parse bus-range property: %d\n", ret);
return ret;
}
num = of_get_available_child_count(pdev->dev.of_node);
num = of_get_available_child_count(np);
pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
GFP_KERNEL);
pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
for_each_available_child_of_node(pdev->dev.of_node, child) {
for_each_available_child_of_node(np, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
ret = mvebu_pcie_parse_port(pcie, port, child);
......@@ -1266,8 +1264,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "%s: cannot map registers\n",
port->name);
dev_err(dev, "%s: cannot map registers\n", port->name);
port->base = NULL;
mvebu_pcie_powerdown(port);
continue;
......
......@@ -154,10 +154,11 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
struct rcar_pci_priv *priv = pw;
struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
if (status & RCAR_PCI_INT_ALLERRORS) {
dev_err(priv->dev, "error irq: status %08x\n", status);
dev_err(dev, "error irq: status %08x\n", status);
/* clear the error(s) */
iowrite32(status & RCAR_PCI_INT_ALLERRORS,
......@@ -170,13 +171,14 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
{
struct device *dev = priv->dev;
int ret;
u32 val;
ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
IRQF_SHARED, "error irq", priv);
if (ret) {
dev_err(priv->dev, "cannot claim IRQ for error handling\n");
dev_err(dev, "cannot claim IRQ for error handling\n");
return;
}
......@@ -192,15 +194,16 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
struct device *dev = priv->dev;
void __iomem *reg = priv->reg;
u32 val;
int ret;
pm_runtime_enable(priv->dev);
pm_runtime_get_sync(priv->dev);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
......@@ -275,7 +278,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->mem_res);
ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
ret = devm_request_pci_bus_resources(dev, &sys->resources);
if (ret < 0)
return ret;
......@@ -311,6 +314,7 @@ static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
struct device *dev = pci->dev;
struct of_pci_range range;
struct of_pci_range_parser parser;
int index = 0;
......@@ -331,14 +335,14 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
/* Catch HW limitations */
if (!(range.flags & IORESOURCE_PREFETCH)) {
dev_err(pci->dev, "window must be prefetchable\n");
dev_err(dev, "window must be prefetchable\n");
return -EINVAL;
}
if (pci->window_addr) {
u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
if (lowaddr < pci->window_size) {
dev_err(pci->dev, "invalid window size/addr\n");
dev_err(dev, "invalid window size/addr\n");
return -EINVAL;
}
}
......@@ -350,6 +354,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
static int rcar_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
......@@ -357,7 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
reg = devm_ioremap_resource(dev, cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
......@@ -368,8 +373,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
if (mem_res->start & 0xFFFF)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct rcar_pci_priv), GFP_KERNEL);
priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
......@@ -378,10 +382,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
priv->dev = &pdev->dev;
priv->dev = dev;
if (priv->irq < 0) {
dev_err(&pdev->dev, "no valid irq found\n");
dev_err(dev, "no valid irq found\n");
return priv->irq;
}
......@@ -390,23 +394,23 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->window_pci = 0x40000000;
priv->window_size = SZ_1G;
if (pdev->dev.of_node) {
if (dev->of_node) {
struct resource busnr;
int ret;
ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
ret = of_pci_parse_bus_range(dev->of_node, &busnr);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse bus-range\n");
dev_err(dev, "failed to parse bus-range\n");
return ret;
}
priv->busnr = busnr.start;
if (busnr.end != busnr.start)
dev_warn(&pdev->dev, "only one bus number supported\n");
dev_warn(dev, "only one bus number supported\n");
ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse dma-range\n");
dev_err(dev, "failed to parse dma-range\n");
return ret;
}
} else {
......@@ -421,7 +425,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
hw.setup = rcar_pci_setup;
pci_common_init_dev(&pdev->dev, &hw);
pci_common_init_dev(dev, &hw);
return 0;
}
......
......@@ -384,6 +384,7 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
......@@ -413,8 +414,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
if (err < 0) {
dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
err);
dev_err(dev, "ioremap_page_range() failed: %d\n", err);
goto unmap;
}
}
......@@ -462,6 +462,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
int where)
{
struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
struct device *dev = pcie->dev;
void __iomem *addr = NULL;
if (bus->number == 0) {
......@@ -482,8 +483,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
addr = (void __iomem *)b->area->addr;
if (!addr) {
dev_err(pcie->dev,
"failed to map cfg. space for bus %u\n",
dev_err(dev, "failed to map cfg. space for bus %u\n",
bus->number);
return NULL;
}
......@@ -584,12 +584,13 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
{
struct tegra_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
devm_iounmap(pcie->dev, port->base);
devm_release_mem_region(pcie->dev, port->regs.start,
devm_iounmap(dev, port->base);
devm_release_mem_region(dev, port->regs.start,
resource_size(&port->regs));
list_del(&port->list);
devm_kfree(pcie->dev, port);
devm_kfree(dev, port);
}
/* Tegra PCIE root complex wrongly reports device class */
......@@ -612,12 +613,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
struct device *dev = pcie->dev;
int err;
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
err = devm_request_resource(dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
......@@ -631,7 +633,7 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
err = devm_request_pci_bus_resources(dev, &sys->resources);
if (err < 0)
return err;
......@@ -672,6 +674,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
"Peer2Peer error",
};
struct tegra_pcie *pcie = arg;
struct device *dev = pcie->dev;
u32 code, signature;
code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
......@@ -689,11 +692,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* happen a lot during enumeration
*/
if (code == AFI_INTR_MASTER_ABORT)
dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
signature);
dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
signature);
dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
code == AFI_INTR_FPCI_DECODE_ERROR) {
......@@ -701,9 +702,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
if (code == AFI_INTR_MASTER_ABORT)
dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
dev_dbg(dev, " FPCI address: %10llx\n", address);
else
dev_err(pcie->dev, " FPCI address: %10llx\n", address);
dev_err(dev, " FPCI address: %10llx\n", address);
}
return IRQ_HANDLED;
......@@ -793,6 +794,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
int err;
......@@ -829,7 +831,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
dev_err(dev, "PLL failed to lock: %d\n", err);
return err;
}
......@@ -859,7 +861,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
/* override IDDQ */
value = pads_readl(pcie, PADS_CTL);
value |= PADS_CTL_IDDQ_1L;
pads_writel(pcie, PADS_CTL, value);
pads_writel(pcie, value, PADS_CTL);
/* reset PLL */
value = pads_readl(pcie, soc->pads_pll_ctl);
......@@ -880,8 +882,7 @@ static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
for (i = 0; i < port->lanes; i++) {
err = phy_power_on(port->phys[i]);
if (err < 0) {
dev_err(dev, "failed to power on PHY#%u: %d\n", i,
err);
dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
return err;
}
}
......@@ -909,6 +910,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
......@@ -920,7 +922,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
err = tegra_pcie_phy_enable(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
dev_err(dev, "failed to power on PHY: %d\n", err);
return err;
}
......@@ -928,7 +930,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_on(port);
if (err < 0) {
dev_err(pcie->dev,
dev_err(dev,
"failed to power on PCIe port %u PHY: %d\n",
port->index, err);
return err;
......@@ -946,6 +948,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port;
int err;
......@@ -956,8 +959,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
err = tegra_pcie_phy_disable(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power off PHY: %d\n",
err);
dev_err(dev, "failed to power off PHY: %d\n", err);
return err;
}
......@@ -965,7 +967,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_off(port);
if (err < 0) {
dev_err(pcie->dev,
dev_err(dev,
"failed to power off PCIe port %u PHY: %d\n",
port->index, err);
return err;
......@@ -977,6 +979,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
......@@ -1016,7 +1019,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
err = tegra_pcie_phy_power_on(pcie);
if (err < 0) {
dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
dev_err(dev, "failed to power on PHY(s): %d\n", err);
return err;
}
......@@ -1049,13 +1052,14 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
/* TODO: disable and unprepare clocks? */
err = tegra_pcie_phy_power_off(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
dev_err(dev, "failed to power off PHY(s): %d\n", err);
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
......@@ -1065,11 +1069,12 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
dev_warn(dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
int err;
......@@ -1082,13 +1087,13 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
dev_err(dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
pcie->pex_rst);
if (err) {
dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
dev_err(dev, "powerup sequence failed: %d\n", err);
return err;
}
......@@ -1096,22 +1101,21 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
dev_err(dev, "failed to enable AFI clock: %d\n", err);
return err;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
dev_err(pcie->dev, "failed to enable CML clock: %d\n",
err);
dev_err(dev, "failed to enable CML clock: %d\n", err);
return err;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
dev_err(dev, "failed to enable PLLE clock: %d\n", err);
return err;
}
......@@ -1120,22 +1124,23 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
pcie->pex_clk = devm_clk_get(dev, "pex");
if (IS_ERR(pcie->pex_clk))
return PTR_ERR(pcie->pex_clk);
pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
pcie->afi_clk = devm_clk_get(dev, "afi");
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
pcie->pll_e = devm_clk_get(dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
if (soc->has_cml_clk) {
pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
pcie->cml_clk = devm_clk_get(dev, "cml");
if (IS_ERR(pcie->cml_clk))
return PTR_ERR(pcie->cml_clk);
}
......@@ -1145,15 +1150,17 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
{
pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
struct device *dev = pcie->dev;
pcie->pex_rst = devm_reset_control_get(dev, "pex");
if (IS_ERR(pcie->pex_rst))
return PTR_ERR(pcie->pex_rst);
pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
pcie->afi_rst = devm_reset_control_get(dev, "afi");
if (IS_ERR(pcie->afi_rst))
return PTR_ERR(pcie->afi_rst);
pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
if (IS_ERR(pcie->pcie_xrst))
return PTR_ERR(pcie->pcie_xrst);
......@@ -1162,18 +1169,19 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
pcie->phy = devm_phy_optional_get(dev, "pcie");
if (IS_ERR(pcie->phy)) {
err = PTR_ERR(pcie->phy);
dev_err(pcie->dev, "failed to get PHY: %d\n", err);
dev_err(dev, "failed to get PHY: %d\n", err);
return err;
}
err = phy_init(pcie->phy);
if (err < 0) {
dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
dev_err(dev, "failed to initialize PHY: %d\n", err);
return err;
}
......@@ -1256,43 +1264,44 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
int err;
err = tegra_pcie_clocks_get(pcie);
if (err) {
dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
dev_err(dev, "failed to get clocks: %d\n", err);
return err;
}
err = tegra_pcie_resets_get(pcie);
if (err) {
dev_err(&pdev->dev, "failed to get resets: %d\n", err);
dev_err(dev, "failed to get resets: %d\n", err);
return err;
}
err = tegra_pcie_phys_get(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
dev_err(dev, "failed to get PHYs: %d\n", err);
return err;
}
err = tegra_pcie_power_on(pcie);
if (err) {
dev_err(&pdev->dev, "failed to power up: %d\n", err);
dev_err(dev, "failed to power up: %d\n", err);
return err;
}
pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
pcie->pads = devm_ioremap_resource(dev, pads);
if (IS_ERR(pcie->pads)) {
err = PTR_ERR(pcie->pads);
goto poweroff;
}
afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
pcie->afi = devm_ioremap_resource(dev, afi);
if (IS_ERR(pcie->afi)) {
err = PTR_ERR(pcie->afi);
goto poweroff;
......@@ -1305,7 +1314,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
goto poweroff;
}
pcie->cs = devm_request_mem_region(pcie->dev, res->start,
pcie->cs = devm_request_mem_region(dev, res->start,
resource_size(res), res->name);
if (!pcie->cs) {
err = -EADDRNOTAVAIL;
......@@ -1315,7 +1324,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
/* request interrupt */
err = platform_get_irq_byname(pdev, "intr");
if (err < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
dev_err(dev, "failed to get IRQ: %d\n", err);
goto poweroff;
}
......@@ -1323,7 +1332,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
if (err) {
dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
dev_err(dev, "failed to register IRQ: %d\n", err);
goto poweroff;
}
......@@ -1336,6 +1345,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
if (pcie->irq > 0)
......@@ -1345,7 +1355,7 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
err = phy_exit(pcie->phy);
if (err < 0)
dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
dev_err(dev, "failed to teardown PHY: %d\n", err);
return 0;
}
......@@ -1384,6 +1394,7 @@ static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
{
struct tegra_pcie *pcie = data;
struct device *dev = pcie->dev;
struct tegra_msi *msi = &pcie->msi;
unsigned int i, processed = 0;
......@@ -1403,13 +1414,13 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
dev_info(pcie->dev, "unhandled MSI\n");
dev_info(dev, "unhandled MSI\n");
} else {
/*
* that's weird who triggered this?
* just clear it
*/
dev_info(pcie->dev, "unexpected MSI\n");
dev_info(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
......@@ -1488,7 +1499,8 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
unsigned long base;
......@@ -1497,20 +1509,20 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.dev = dev;
msi->chip.setup_irq = tegra_msi_setup_irq;
msi->chip.teardown_irq = tegra_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
dev_err(dev, "failed to get IRQ: %d\n", err);
goto err;
}
......@@ -1519,7 +1531,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
......@@ -1594,46 +1606,47 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(pcie->dev, "4x1, 1x1 configuration\n");
dev_info(dev, "4x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
return 0;
case 0x0000102:
dev_info(pcie->dev, "2x1, 1x1 configuration\n");
dev_info(dev, "2x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
switch (lanes) {
case 0x00000204:
dev_info(pcie->dev, "4x1, 2x1 configuration\n");
dev_info(dev, "4x1, 2x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
return 0;
case 0x00020202:
dev_info(pcie->dev, "2x3 configuration\n");
dev_info(dev, "2x3 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
return 0;
case 0x00010104:
dev_info(pcie->dev, "4x1, 1x2 configuration\n");
dev_info(dev, "4x1, 1x2 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
switch (lanes) {
case 0x00000004:
dev_info(pcie->dev, "single-mode configuration\n");
dev_info(dev, "single-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
return 0;
case 0x00000202:
dev_info(pcie->dev, "dual-mode configuration\n");
dev_info(dev, "dual-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
return 0;
}
......@@ -1673,7 +1686,8 @@ static bool of_regulator_bulk_available(struct device_node *np,
*/
static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
pcie->num_supplies = 3;
......@@ -1681,12 +1695,12 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
pcie->num_supplies = 2;
if (pcie->num_supplies == 0) {
dev_err(pcie->dev, "device %s not supported in legacy mode\n",
dev_err(dev, "device %s not supported in legacy mode\n",
np->full_name);
return -ENODEV;
}
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
......@@ -1698,8 +1712,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
if (pcie->num_supplies > 2)
pcie->supplies[2].supply = "avdd";
return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
pcie->supplies);
return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
}
/*
......@@ -1713,13 +1726,14 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
*/
static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
unsigned int i = 0;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
......@@ -1746,7 +1760,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
(need_pexb ? 2 : 0);
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
......@@ -1769,7 +1783,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
pcie->num_supplies = 5;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
......@@ -1782,9 +1796,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->supplies[4].supply = "vddio-pex-clk";
}
if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
pcie->num_supplies))
return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
return devm_regulator_bulk_get(dev, pcie->num_supplies,
pcie->supplies);
/*
......@@ -1792,9 +1806,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
* that the device tree complies with an older version of the device
* tree binding.
*/
dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
dev_info(dev, "using legacy DT binding for power supplies\n");
devm_kfree(pcie->dev, pcie->supplies);
devm_kfree(dev, pcie->supplies);
pcie->num_supplies = 0;
return tegra_pcie_get_legacy_regulators(pcie);
......@@ -1802,7 +1816,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device_node *np = pcie->dev->of_node, *port;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser;
struct of_pci_range range;
......@@ -1812,7 +1827,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
int err;
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pcie->dev, "missing \"ranges\" property\n");
dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
......@@ -1867,8 +1882,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
err);
dev_err(dev, "failed to parse ranges property: %d\n", err);
pcie->busn.name = np->name;
pcie->busn.start = 0;
pcie->busn.end = 0xff;
......@@ -1883,15 +1897,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_get_devfn(port);
if (err < 0) {
dev_err(pcie->dev, "failed to parse address: %d\n",
err);
dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
index = PCI_SLOT(err);
if (index < 1 || index > soc->num_ports) {
dev_err(pcie->dev, "invalid port number: %d\n", index);
dev_err(dev, "invalid port number: %d\n", index);
return -EINVAL;
}
......@@ -1899,13 +1912,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
if (err < 0) {
dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
dev_err(dev, "failed to parse # of lanes: %d\n",
err);
return err;
}
if (value > 16) {
dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
dev_err(dev, "invalid # of lanes: %u\n", value);
return -EINVAL;
}
......@@ -1919,14 +1932,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
mask |= ((1 << value) - 1) << lane;
lane += value;
rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
if (err < 0) {
dev_err(pcie->dev, "failed to parse address: %d\n",
err);
dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
......@@ -1936,7 +1948,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
rp->base = devm_ioremap_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
......@@ -1945,7 +1957,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
if (err < 0) {
dev_err(pcie->dev, "invalid lane configuration\n");
dev_err(dev, "invalid lane configuration\n");
return err;
}
......@@ -1964,6 +1976,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
{
struct device *dev = port->pcie->dev;
unsigned int retries = 3;
unsigned long value;
......@@ -1986,8 +1999,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
dev_err(port->pcie->dev, "link %u down, retrying\n",
port->index);
dev_err(dev, "link %u down, retrying\n", port->index);
goto retry;
}
......@@ -2011,11 +2023,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
static int tegra_pcie_enable(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
dev_info(pcie->dev, "probing port %u, using %u lanes\n",
dev_info(dev, "probing port %u, using %u lanes\n",
port->index, port->lanes);
tegra_pcie_port_enable(port);
......@@ -2023,7 +2036,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
if (tegra_pcie_port_check_link(port))
continue;
dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
dev_info(dev, "link %u down, ignoring\n", port->index);
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
......@@ -2041,8 +2054,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
hw.map_irq = tegra_pcie_map_irq;
hw.ops = &tegra_pcie_ops;
pci_common_init_dev(pcie->dev, &hw);
pci_common_init_dev(dev, &hw);
return 0;
}
......@@ -2204,17 +2216,18 @@ static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
static int tegra_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_pcie *pcie;
int err;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->soc = of_device_get_match_data(&pdev->dev);
pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
pcie->dev = &pdev->dev;
pcie->dev = dev;
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
......@@ -2222,7 +2235,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
......@@ -2236,27 +2249,23 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n",
err);
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto put_resources;
}
}
err = tegra_pcie_enable(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
dev_err(dev, "failed to enable PCIe ports: %d\n", err);
goto disable_msi;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
err);
dev_err(dev, "failed to setup debugfs: %d\n", err);
}
platform_set_drvdata(pdev, pcie);
return 0;
disable_msi:
......
......@@ -76,6 +76,16 @@ struct xgene_pcie_port {
u32 version;
};
static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
{
return readl(port->csr_base + reg);
}
static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
{
writel(val, port->csr_base + reg);
}
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
......@@ -112,9 +122,9 @@ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
if (!pci_is_root_bus(bus))
rtdid_val = (b << 8) | (d << 3) | f;
writel(rtdid_val, port->csr_base + RTDID);
xgene_pcie_writel(port, RTDID, rtdid_val);
/* read the register back to ensure flush */
readl(port->csr_base + RTDID);
xgene_pcie_readl(port, RTDID);
}
/*
......@@ -179,28 +189,28 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
u32 val32 = 0;
u32 val;
val32 = readl(csr_base + addr);
val32 = xgene_pcie_readl(port, addr);
val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
writel(val, csr_base + addr);
xgene_pcie_writel(port, addr, val);
val32 = readl(csr_base + addr + 0x04);
val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x04);
xgene_pcie_writel(port, addr + 0x04, val);
val32 = readl(csr_base + addr + 0x04);
val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
writel(val, csr_base + addr + 0x04);
xgene_pcie_writel(port, addr + 0x04, val);
val32 = readl(csr_base + addr + 0x08);
val32 = xgene_pcie_readl(port, addr + 0x08);
val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x08);
xgene_pcie_writel(port, addr + 0x08, val);
return mask;
}
......@@ -208,32 +218,32 @@ static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
u32 *lanes, u32 *speed)
{
void __iomem *csr_base = port->csr_base;
u32 val32;
port->link_up = false;
val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
if (val32 & LINK_UP_MASK) {
port->link_up = true;
*speed = PIPE_PHY_RATE_RD(val32);
val32 = readl(csr_base + BRIDGE_STATUS_0);
val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
*lanes = val32 >> 26;
}
}
static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
int rc;
port->clk = clk_get(port->dev, NULL);
port->clk = clk_get(dev, NULL);
if (IS_ERR(port->clk)) {
dev_err(port->dev, "clock not available\n");
dev_err(dev, "clock not available\n");
return -ENODEV;
}
rc = clk_prepare_enable(port->clk);
if (rc) {
dev_err(port->dev, "clock enable failed\n");
dev_err(dev, "clock enable failed\n");
return rc;
}
......@@ -243,15 +253,16 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct platform_device *pdev)
{
struct device *dev = port->dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
port->csr_base = devm_ioremap_resource(port->dev, res);
port->csr_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
port->cfg_base = devm_ioremap_resource(port->dev, res);
port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
......@@ -263,7 +274,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
void __iomem *base = port->csr_base + offset;
struct device *dev = port->dev;
resource_size_t size = resource_size(res);
u64 restype = resource_type(res);
u64 mask = 0;
......@@ -280,22 +291,24 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
if (size >= min_size)
mask = ~(size - 1) | flag;
else
dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
(u64)size, min_size);
writel(lower_32_bits(cpu_addr), base);
writel(upper_32_bits(cpu_addr), base + 0x04);
writel(lower_32_bits(mask), base + 0x08);
writel(upper_32_bits(mask), base + 0x0c);
writel(lower_32_bits(pci_addr), base + 0x10);
writel(upper_32_bits(pci_addr), base + 0x14);
xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
{
writel(lower_32_bits(addr), csr_base + CFGBARL);
writel(upper_32_bits(addr), csr_base + CFGBARH);
writel(EN_REG, csr_base + CFGCTL);
u64 addr = port->cfg_addr;
xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
......@@ -310,7 +323,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct resource *res = window->res;
u64 restype = resource_type(res);
dev_dbg(port->dev, "%pR\n", res);
dev_dbg(dev, "%pR\n", res);
switch (restype) {
case IORESOURCE_IO:
......@@ -339,17 +352,18 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return -EINVAL;
}
}
xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
xgene_pcie_setup_cfg_reg(port);
return 0;
}
static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
u64 pim, u64 size)
{
writel(lower_32_bits(pim), addr);
writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
writel(lower_32_bits(size), addr + 0x10);
writel(upper_32_bits(size), addr + 0x14);
xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
xgene_pcie_writel(port, pim_reg + 0x04,
upper_32_bits(pim) | EN_COHERENCY);
xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
}
/*
......@@ -379,10 +393,10 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
{
void __iomem *csr_base = port->csr_base;
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
void *pim_addr;
u32 pim_reg;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
......@@ -393,7 +407,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
dev_warn(port->dev, "invalid pcie dma-range config\n");
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
......@@ -403,29 +417,27 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
switch (region) {
case 0:
xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
pim_addr = csr_base + PIM1_1L;
pim_reg = PIM1_1L;
break;
case 1:
bar_addr = csr_base + IBAR2;
writel(bar_low, bar_addr);
writel(lower_32_bits(mask), csr_base + IR2MSK);
pim_addr = csr_base + PIM2_1L;
xgene_pcie_writel(port, IBAR2, bar_low);
xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
pim_reg = PIM2_1L;
break;
case 2:
bar_addr = csr_base + IBAR3L;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
writel(lower_32_bits(mask), csr_base + IR3MSKL);
writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
pim_addr = csr_base + PIM3_1L;
xgene_pcie_writel(port, IBAR3L, bar_low);
xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
pim_reg = PIM3_1L;
break;
}
xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
......@@ -463,7 +475,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
......@@ -476,13 +488,14 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
int i;
for (i = PIM1_1L; i <= CFGCTL; i += 4)
writel(0x0, port->csr_base + i);
xgene_pcie_writel(port, i, 0);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
int ret;
......@@ -490,7 +503,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
/* setup the vendor and device IDs correctly */
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
writel(val, port->csr_base + BRIDGE_CFG_0);
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
ret = xgene_pcie_map_ranges(port, res, io_base);
if (ret)
......@@ -502,27 +515,28 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
xgene_pcie_linkup(port, &lanes, &speed);
if (!port->link_up)
dev_info(port->dev, "(rc) link down\n");
dev_info(dev, "(rc) link down\n");
else
dev_info(port->dev, "(rc) x%d gen-%d link up\n",
lanes, speed + 1);
dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
return 0;
}
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus;
int ret;
LIST_HEAD(res);
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->node = of_node_get(pdev->dev.of_node);
port->dev = &pdev->dev;
port->node = of_node_get(dn);
port->dev = dev;
port->version = XGENE_PCIE_IP_VER_UNKN;
if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
......@@ -540,7 +554,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
ret = devm_request_pci_bus_resources(&pdev->dev, &res);
ret = devm_request_pci_bus_resources(dev, &res);
if (ret)
goto error;
......@@ -548,8 +562,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xgene_pcie_ops, port, &res);
bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
if (!bus) {
ret = -ENOMEM;
goto error;
......@@ -558,8 +571,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, port);
return 0;
error:
......
......@@ -55,15 +55,19 @@
#define TLP_PAYLOAD_SIZE 0x01
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
#define RP_DEVFN 0
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, bus) \
((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
: TLP_FMTTYPE_CFGRD1) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
(((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
#define RP_DEVFN 0
#define LINK_UP_TIMEOUT HZ
#define LINK_RETRAIN_TIMEOUT HZ
......@@ -74,7 +78,7 @@
struct altera_pcie {
struct platform_device *pdev;
void __iomem *cra_base;
void __iomem *cra_base; /* DT Cra */
int irq;
u8 root_bus_nr;
struct irq_domain *irq_domain;
......@@ -131,7 +135,7 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
static bool altera_pcie_valid_device(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
......@@ -218,13 +222,8 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
{
u32 headers[TLP_HDR_SIZE];
if (bus == pcie->root_bus_nr)
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_READ_TAG, byte_en);
headers[0] = TLP_CFG_DW0(pcie, bus);
headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
tlp_write_packet(pcie, headers, 0, false);
......@@ -238,13 +237,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
if (bus == pcie->root_bus_nr)
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_WRITE_TAG, byte_en);
headers[0] = TLP_CFG_DW0(pcie, bus);
headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
/* check alignment to Qword */
......@@ -342,7 +336,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
......@@ -359,7 +353,7 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
......@@ -394,6 +388,7 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
static void altera_wait_link_retrain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
u16 reg16;
unsigned long start_jiffies;
......@@ -406,7 +401,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link retrain timeout\n");
dev_err(dev, "link retrain timeout\n");
break;
}
udelay(100);
......@@ -419,7 +414,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link up timeout\n");
dev_err(dev, "link up timeout\n");
break;
}
udelay(100);
......@@ -460,7 +455,6 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
......@@ -472,12 +466,14 @@ static void altera_pcie_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct altera_pcie *pcie;
struct device *dev;
unsigned long status;
u32 bit;
u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
dev = &pcie->pdev->dev;
while ((status = cra_readl(pcie, P2A_INT_STATUS)
& P2A_INT_STS_ALL) != 0) {
......@@ -489,8 +485,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
if (virq)
generic_handle_irq(virq);
else
dev_err(&pcie->pdev->dev,
"unexpected IRQ, INT%d\n", bit);
dev_err(dev, "unexpected IRQ, INT%d\n", bit);
}
}
......@@ -549,30 +544,25 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
struct resource *cra;
struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
struct resource *cra;
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
if (!cra) {
dev_err(&pdev->dev, "no Cra memory resource defined\n");
return -ENODEV;
}
pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
pcie->cra_base = devm_ioremap_resource(dev, cra);
if (IS_ERR(pcie->cra_base)) {
dev_err(&pdev->dev, "failed to map cra memory\n");
dev_err(dev, "failed to map cra memory\n");
return PTR_ERR(pcie->cra_base);
}
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq <= 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
return -EINVAL;
}
irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
return 0;
}
......@@ -583,12 +573,13 @@ static void altera_pcie_host_init(struct altera_pcie *pcie)
static int altera_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct altera_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
int ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
......@@ -596,7 +587,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_dt(pcie);
if (ret) {
dev_err(&pdev->dev, "Parsing DT failed\n");
dev_err(dev, "Parsing DT failed\n");
return ret;
}
......@@ -604,13 +595,13 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed add resources\n");
dev_err(dev, "Failed add resources\n");
return ret;
}
ret = altera_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
dev_err(dev, "Failed creating IRQ Domain\n");
return ret;
}
......@@ -620,7 +611,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
if (!bus)
return -ENOMEM;
......@@ -633,8 +624,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return ret;
}
......
......@@ -42,19 +42,24 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
LIST_HEAD(res);
struct resource res_mem;
int ret;
pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &bdev->dev;
bcma_set_drvdata(bdev, pcie);
pcie->dev = dev;
pcie->base = bdev->io_addr;
if (!pcie->base) {
dev_err(dev, "no controller registers\n");
return -ENOMEM;
}
pcie->base_addr = bdev->addr;
res_mem.start = bdev->addr_s[0];
......@@ -67,10 +72,11 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
bcma_set_drvdata(bdev, pcie);
return ret;
}
......
......@@ -40,35 +40,35 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct iproc_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(res);
int ret;
of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
of_id = of_match_device(iproc_pcie_of_match_table, dev);
if (!of_id)
return -EINVAL;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
pcie->dev = dev;
pcie->type = (enum iproc_pcie_type)of_id->data;
platform_set_drvdata(pdev, pcie);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
dev_err(pcie->dev, "unable to obtain controller resources\n");
dev_err(dev, "unable to obtain controller resources\n");
return ret;
}
pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
if (!pcie->base) {
dev_err(pcie->dev, "unable to map controller registers\n");
dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
}
pcie->base_addr = reg.start;
......@@ -79,7 +79,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
&val);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"missing brcm,pcie-ob-axi-offset property\n");
return ret;
}
......@@ -88,7 +88,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
&val);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"missing brcm,pcie-ob-window-size property\n");
return ret;
}
......@@ -101,7 +101,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
}
/* PHY use is optional */
pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
return -EPROBE_DEFER;
......@@ -110,7 +110,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"unable to get PCI host bridge resources\n");
return ret;
}
......@@ -119,10 +119,11 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
platform_set_drvdata(pdev, pcie);
return ret;
}
......
......@@ -63,6 +63,8 @@
#define OARR_SIZE_CFG_SHIFT 1
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
#define PCI_EXP_CAP 0xac
#define MAX_NUM_OB_WINDOWS 2
#define IPROC_PCIE_REG_INVALID 0xffff
......@@ -258,9 +260,10 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
struct device *dev = pcie->dev;
u8 hdr_type;
u32 link_ctrl, class, val;
u16 pos, link_status;
u16 pos = PCI_EXP_CAP, link_status;
bool link_is_active = false;
/*
......@@ -272,14 +275,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
dev_err(dev, "PHY or data link is INACTIVE!\n");
return -ENODEV;
}
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
......@@ -293,30 +296,27 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
#define PCI_TARGET_LINK_SPEED_MASK 0xf
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
pci_bus_read_config_dword(bus, 0,
PCI_LINK_STATUS_CTRL_2_OFFSET,
pos + PCI_EXP_LNKCTL2,
&link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
pci_bus_write_config_dword(bus, 0,
PCI_LINK_STATUS_CTRL_2_OFFSET,
pos + PCI_EXP_LNKCTL2,
link_ctrl);
msleep(100);
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
......@@ -324,7 +324,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
}
dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
return link_is_active ? 0 : -ENODEV;
}
......@@ -349,12 +349,13 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
u64 pci_addr, resource_size_t size)
{
struct iproc_pcie_ob *ob = &pcie->ob;
struct device *dev = pcie->dev;
unsigned i;
u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
u64 remainder;
if (size > max_size) {
dev_err(pcie->dev,
dev_err(dev,
"res size %pap exceeds max supported size 0x%llx\n",
&size, max_size);
return -EINVAL;
......@@ -362,15 +363,14 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
div64_u64_rem(size, ob->window_size, &remainder);
if (remainder) {
dev_err(pcie->dev,
dev_err(dev,
"res size %pap needs to be multiple of window size %pap\n",
&size, &ob->window_size);
return -EINVAL;
}
if (axi_addr < ob->axi_offset) {
dev_err(pcie->dev,
"axi address %pap less than offset %pap\n",
dev_err(dev, "axi address %pap less than offset %pap\n",
&axi_addr, &ob->axi_offset);
return -EINVAL;
}
......@@ -406,6 +406,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
struct list_head *resources)
{
struct device *dev = pcie->dev;
struct resource_entry *window;
int ret;
......@@ -425,7 +426,7 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return ret;
break;
default:
dev_err(pcie->dev, "invalid resource %pR\n", res);
dev_err(dev, "invalid resource %pR\n", res);
return -EINVAL;
}
}
......@@ -455,26 +456,25 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
int ret;
void *sysdata;
struct pci_bus *bus;
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
ret = devm_request_pci_bus_resources(pcie->dev, res);
dev = pcie->dev;
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
ret = phy_init(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
dev_err(dev, "unable to initialize PCIe PHY\n");
return ret;
}
ret = phy_power_on(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to power on PCIe PHY\n");
dev_err(dev, "unable to power on PCIe PHY\n");
goto err_exit_phy;
}
......@@ -486,7 +486,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pcie->reg_offsets = iproc_pcie_reg_paxc;
break;
default:
dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
dev_err(dev, "incompatible iProc PCIe interface\n");
ret = -EINVAL;
goto err_power_off_phy;
}
......@@ -496,7 +496,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
dev_err(pcie->dev, "map failed\n");
dev_err(dev, "map failed\n");
goto err_power_off_phy;
}
}
......@@ -508,9 +508,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
sysdata = pcie;
#endif
bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
dev_err(pcie->dev, "unable to create PCI root bus\n");
dev_err(dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
goto err_power_off_phy;
}
......@@ -518,7 +518,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
ret = iproc_pcie_check_link(pcie, bus);
if (ret) {
dev_err(pcie->dev, "no PCIe EP device detected\n");
dev_err(dev, "no PCIe EP device detected\n");
goto err_rm_root_bus;
}
......@@ -526,7 +526,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (IS_ENABLED(CONFIG_PCI_MSI))
if (iproc_pcie_msi_enable(pcie))
dev_info(pcie->dev, "not using iProc MSI\n");
dev_info(dev, "not using iProc MSI\n");
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
......
......@@ -31,8 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define DRV_NAME "rcar-pcie"
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE (1 << 31)
......@@ -397,6 +395,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
unsigned int timeout = 1000;
u32 macsr;
......@@ -404,7 +403,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
return;
if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
dev_err(pcie->dev, "Speed change already in progress\n");
dev_err(dev, "Speed change already in progress\n");
return;
}
......@@ -433,7 +432,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, macsr, MACSR);
if (macsr & SPCHGFAIL)
dev_err(pcie->dev, "Speed change failed\n");
dev_err(dev, "Speed change failed\n");
goto done;
}
......@@ -441,15 +440,16 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
msleep(1);
};
dev_err(pcie->dev, "Speed change timed out\n");
dev_err(dev, "Speed change timed out\n");
done:
dev_info(pcie->dev, "Current link speed is %s GT/s\n",
dev_info(dev, "Current link speed is %s GT/s\n",
(macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
}
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct pci_bus *bus, *child;
LIST_HEAD(res);
......@@ -461,14 +461,14 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
if (IS_ENABLED(CONFIG_PCI_MSI))
bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
else
bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res);
if (!bus) {
dev_err(pcie->dev, "Scanning rootbus failed");
dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
}
......@@ -487,6 +487,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
static int phy_wait_for_ack(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
unsigned int timeout = 100;
while (timeout--) {
......@@ -496,7 +497,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
udelay(100);
}
dev_err(pcie->dev, "Access to PCIe phy timed out\n");
dev_err(dev, "Access to PCIe phy timed out\n");
return -ETIMEDOUT;
}
......@@ -697,6 +698,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie *pcie = data;
struct rcar_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
unsigned long reg;
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
......@@ -717,10 +719,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
dev_info(pcie->dev, "unhandled MSI\n");
dev_info(dev, "unhandled MSI\n");
} else {
/* Unknown MSI, just clear it */
dev_dbg(pcie->dev, "unexpected MSI\n");
dev_dbg(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
......@@ -843,22 +845,22 @@ static const struct irq_domain_ops msi_domain_ops = {
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
unsigned long base;
int err, i;
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.dev = dev;
msi->chip.setup_irq = rcar_msi_setup_irq;
msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
......@@ -866,19 +868,19 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
irq_create_mapping(msi->domain, i);
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
......@@ -899,32 +901,32 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return err;
}
static int rcar_pcie_get_resources(struct platform_device *pdev,
struct rcar_pcie *pcie)
static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct resource res;
int err, i;
err = of_address_to_resource(pdev->dev.of_node, 0, &res);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
pcie->base = devm_ioremap_resource(&pdev->dev, &res);
pcie->base = devm_ioremap_resource(dev, &res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
pcie->clk = devm_clk_get(&pdev->dev, "pcie");
pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(pcie->clk)) {
dev_err(pcie->dev, "cannot get platform clock\n");
dev_err(dev, "cannot get platform clock\n");
return PTR_ERR(pcie->clk);
}
err = clk_prepare_enable(pcie->clk);
if (err)
return err;
pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
dev_err(pcie->dev, "cannot get pcie bus clock\n");
dev_err(dev, "cannot get pcie bus clock\n");
err = PTR_ERR(pcie->bus_clk);
goto fail_clk;
}
......@@ -932,17 +934,17 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
if (err)
goto fail_clk;
i = irq_of_parse_and_map(pdev->dev.of_node, 0);
i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
pcie->msi.irq1 = i;
i = irq_of_parse_and_map(pdev->dev.of_node, 1);
i = irq_of_parse_and_map(dev->of_node, 1);
if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
......@@ -1119,60 +1121,60 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
const struct of_device_id *of_id;
int err;
int (*hw_init_fn)(struct rcar_pcie *);
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
INIT_LIST_HEAD(&pcie->resources);
rcar_pcie_parse_request_of_pci_ranges(pcie);
err = rcar_pcie_get_resources(pdev, pcie);
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
return err;
of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
of_id = of_match_device(rcar_pcie_of_match, dev);
if (!of_id || !of_id->data)
return -EINVAL;
hw_init_fn = of_id->data;
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_pm_disable;
}
/* Failure to get a link might just be that no cards are inserted */
err = hw_init_fn(pcie);
if (err) {
dev_info(&pdev->dev, "PCIe link down\n");
dev_info(dev, "PCIe link down\n");
err = 0;
goto err_pm_put;
}
data = rcar_pci_read_reg(pcie, MACSR);
dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = rcar_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(&pdev->dev,
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
goto err_pm_put;
......@@ -1186,16 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
err_pm_put:
pm_runtime_put(pcie->dev);
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(pcie->dev);
pm_runtime_disable(dev);
return err;
}
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = DRV_NAME,
.name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
.suppress_bind_attrs = true,
},
......
......@@ -1091,8 +1091,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err)
goto err_vpcie;
platform_set_drvdata(pdev, rockchip);
rockchip_pcie_enable_interrupts(rockchip);
err = rockchip_pcie_init_irq_domain(rockchip);
......
......@@ -212,6 +212,7 @@ static bool nwl_phy_link_up(struct nwl_pcie *pcie)
static int nwl_wait_for_link(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
int retries;
/* check if the link is up or not */
......@@ -221,7 +222,7 @@ static int nwl_wait_for_link(struct nwl_pcie *pcie)
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
dev_err(pcie->dev, "PHY link never came up\n");
dev_err(dev, "PHY link never came up\n");
return -ETIMEDOUT;
}
......@@ -277,6 +278,7 @@ static struct pci_ops nwl_pcie_ops = {
static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
{
struct nwl_pcie *pcie = data;
struct device *dev = pcie->dev;
u32 misc_stat;
/* Checking for misc interrupts */
......@@ -286,45 +288,43 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
dev_err(pcie->dev, "Received Message FIFO Overflow\n");
dev_err(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
dev_err(pcie->dev, "Slave error\n");
dev_err(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
dev_err(pcie->dev, "Master error\n");
dev_err(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
dev_err(pcie->dev,
"In Misc Ingress address translation error\n");
dev_err(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
dev_err(pcie->dev,
"In Misc Egress address translation error\n");
dev_err(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
dev_err(pcie->dev, "Fatal Error in AER Capability\n");
dev_err(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n");
dev_err(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
dev_err(pcie->dev, "Correctable Error in AER Capability\n");
dev_err(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
dev_err(pcie->dev, "Unsupported request Detected\n");
dev_err(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
dev_err(pcie->dev, "Non-Fatal Error Detected\n");
dev_err(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
dev_err(pcie->dev, "Fatal Error Detected\n");
dev_err(dev, "Fatal Error Detected\n");
if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n");
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n");
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
......@@ -494,20 +494,21 @@ static const struct irq_domain_ops dev_msi_domain_ops = {
static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
struct device *dev = pcie->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
&dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(pcie->dev, "failed to create dev IRQ domain\n");
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
&nwl_msi_domain_info,
msi->dev_domain);
if (!msi->msi_domain) {
dev_err(pcie->dev, "failed to create msi IRQ domain\n");
dev_err(dev, "failed to create msi IRQ domain\n");
irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
......@@ -517,12 +518,13 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device_node *node = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct device_node *legacy_intc_node;
legacy_intc_node = of_get_next_child(node, NULL);
if (!legacy_intc_node) {
dev_err(pcie->dev, "No legacy intc node found\n");
dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
......@@ -532,7 +534,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
pcie);
if (!pcie->legacy_irq_domain) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
......@@ -542,7 +544,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nwl_msi *msi = &pcie->msi;
unsigned long base;
int ret;
......@@ -557,7 +560,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_1 IRQ number */
msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
if (msi->irq_msi1 < 0) {
dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
ret = -EINVAL;
goto err;
}
......@@ -568,7 +571,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_0 IRQ number */
msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
if (msi->irq_msi0 < 0) {
dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
ret = -EINVAL;
goto err;
}
......@@ -579,7 +582,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Check for msii_present bit */
ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
if (!ret) {
dev_err(pcie->dev, "MSI not present\n");
dev_err(dev, "MSI not present\n");
ret = -EIO;
goto err;
}
......@@ -628,13 +631,14 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
u32 breg_val, ecam_val, first_busno = 0;
int err;
breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
if (!breg_val) {
dev_err(pcie->dev, "BREG is not present\n");
dev_err(dev, "BREG is not present\n");
return breg_val;
}
......@@ -665,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
if (!ecam_val) {
dev_err(pcie->dev, "ECAM is not present\n");
dev_err(dev, "ECAM is not present\n");
return ecam_val;
}
......@@ -692,23 +696,23 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
if (nwl_pcie_link_up(pcie))
dev_info(pcie->dev, "Link is UP\n");
dev_info(dev, "Link is UP\n");
else
dev_info(pcie->dev, "Link is DOWN\n");
dev_info(dev, "Link is DOWN\n");
/* Get misc IRQ number */
pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
if (pcie->irq_misc < 0) {
dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
dev_err(dev, "failed to get misc IRQ %d\n",
pcie->irq_misc);
return -EINVAL;
}
err = devm_request_irq(pcie->dev, pcie->irq_misc,
err = devm_request_irq(dev, pcie->irq_misc,
nwl_pcie_misc_handler, IRQF_SHARED,
"nwl_pcie:misc", pcie);
if (err) {
dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
dev_err(dev, "fail to register misc IRQ#%d\n",
pcie->irq_misc);
return err;
}
......@@ -744,31 +748,32 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
struct device_node *node = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct resource *res;
const char *type;
/* Check for device type */
type = of_get_property(node, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
pcie->breg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->breg_base))
return PTR_ERR(pcie->breg_base);
pcie->phys_breg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
pcie->pcireg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->pcireg_base))
return PTR_ERR(pcie->pcireg_base);
pcie->phys_pcie_reg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
pcie->ecam_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->ecam_base))
return PTR_ERR(pcie->ecam_base);
pcie->phys_ecam_base = res->start;
......@@ -776,8 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
/* Get intx IRQ number */
pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
if (pcie->irq_intx < 0) {
dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
pcie->irq_intx);
dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
return -EINVAL;
}
......@@ -794,7 +798,8 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
......@@ -802,42 +807,42 @@ static int nwl_pcie_probe(struct platform_device *pdev)
resource_size_t iobase = 0;
LIST_HEAD(res);
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
pcie->dev = dev;
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
err = nwl_pcie_parse_dt(pcie, pdev);
if (err) {
dev_err(pcie->dev, "Parsing DT failed\n");
dev_err(dev, "Parsing DT failed\n");
return err;
}
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(pcie->dev, "HW Initialization failed\n");
dev_err(dev, "HW Initialization failed\n");
return err;
}
err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
if (err) {
dev_err(pcie->dev, "Getting bridge resources failed\n");
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
err = devm_request_pci_bus_resources(pcie->dev, &res);
err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto error;
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(pcie->dev, "Failed creating IRQ Domain\n");
dev_err(dev, "Failed creating IRQ Domain\n");
goto error;
}
bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
bus = pci_create_root_bus(dev, pcie->root_busno,
&nwl_pcie_ops, pcie, &res);
if (!bus) {
err = -ENOMEM;
......@@ -847,8 +852,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = nwl_pcie_enable_msi(pcie, bus);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n", err);
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto error;
}
}
......@@ -857,7 +861,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return 0;
error:
......
......@@ -140,10 +140,11 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
dev_dbg(port->dev, "Requester ID %lu\n",
dev_dbg(dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
......@@ -228,11 +229,10 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
/**
* xilinx_pcie_assign_msi - Allocate MSI number
* @port: PCIe port structure
*
* Return: A valid IRQ on success and error value on failure.
*/
static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
static int xilinx_pcie_assign_msi(void)
{
int pos;
......@@ -275,7 +275,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
struct msi_msg msg;
phys_addr_t msg_addr;
hwirq = xilinx_pcie_assign_msi(port);
hwirq = xilinx_pcie_assign_msi();
if (hwirq < 0)
return hwirq;
......@@ -383,6 +383,7 @@ static const struct irq_domain_ops intx_domain_ops = {
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
struct device *dev = port->dev;
u32 val, mask, status, msi_data;
/* Read interrupt decode and mask registers */
......@@ -394,32 +395,32 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
return IRQ_NONE;
if (status & XILINX_PCIE_INTR_LINK_DOWN)
dev_warn(port->dev, "Link Down\n");
dev_warn(dev, "Link Down\n");
if (status & XILINX_PCIE_INTR_ECRC_ERR)
dev_warn(port->dev, "ECRC failed\n");
dev_warn(dev, "ECRC failed\n");
if (status & XILINX_PCIE_INTR_STR_ERR)
dev_warn(port->dev, "Streaming error\n");
dev_warn(dev, "Streaming error\n");
if (status & XILINX_PCIE_INTR_HOT_RESET)
dev_info(port->dev, "Hot reset\n");
dev_info(dev, "Hot reset\n");
if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
dev_warn(port->dev, "ECAM access timeout\n");
dev_warn(dev, "ECAM access timeout\n");
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
dev_warn(port->dev, "Correctable error message\n");
dev_warn(dev, "Correctable error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
dev_warn(port->dev, "Non fatal error message\n");
dev_warn(dev, "Non fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_FATAL) {
dev_warn(port->dev, "Fatal error message\n");
dev_warn(dev, "Fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
......@@ -429,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
dev_warn(dev, "RP Intr FIFO1 read error\n");
goto error;
}
......@@ -451,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
dev_warn(dev, "RP Intr FIFO1 read error\n");
goto error;
}
......@@ -471,31 +472,31 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
dev_warn(port->dev, "Slave unsupported request\n");
dev_warn(dev, "Slave unsupported request\n");
if (status & XILINX_PCIE_INTR_SLV_UNEXP)
dev_warn(port->dev, "Slave unexpected completion\n");
dev_warn(dev, "Slave unexpected completion\n");
if (status & XILINX_PCIE_INTR_SLV_COMPL)
dev_warn(port->dev, "Slave completion timeout\n");
dev_warn(dev, "Slave completion timeout\n");
if (status & XILINX_PCIE_INTR_SLV_ERRP)
dev_warn(port->dev, "Slave Error Poison\n");
dev_warn(dev, "Slave Error Poison\n");
if (status & XILINX_PCIE_INTR_SLV_CMPABT)
dev_warn(port->dev, "Slave Completer Abort\n");
dev_warn(dev, "Slave Completer Abort\n");
if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
dev_warn(port->dev, "Slave Illegal Burst\n");
dev_warn(dev, "Slave Illegal Burst\n");
if (status & XILINX_PCIE_INTR_MST_DECERR)
dev_warn(port->dev, "Master decode error\n");
dev_warn(dev, "Master decode error\n");
if (status & XILINX_PCIE_INTR_MST_SLVERR)
dev_warn(port->dev, "Master slave error\n");
dev_warn(dev, "Master slave error\n");
if (status & XILINX_PCIE_INTR_MST_ERRP)
dev_warn(port->dev, "Master error poison\n");
dev_warn(dev, "Master error poison\n");
error:
/* Clear the Interrupt Decode register */
......@@ -554,10 +555,12 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
if (xilinx_pcie_link_is_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(port->dev, "PCIe Link is DOWN\n");
dev_info(dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
......@@ -627,8 +630,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
*/
static int xilinx_pcie_probe(struct platform_device *pdev)
{
struct xilinx_pcie_port *port;
struct device *dev = &pdev->dev;
struct xilinx_pcie_port *port;
struct pci_bus *bus;
int err;
resource_size_t iobase = 0;
......@@ -668,15 +671,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
if (err)
goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xilinx_pcie_ops, port, &res);
bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
if (!bus) {
err = -ENOMEM;
goto error;
}
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
xilinx_pcie_msi_chip.dev = dev;
bus->msi = &xilinx_pcie_msi_chip;
#endif
pci_scan_child_bus(bus);
......@@ -685,8 +687,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, port);
return 0;
error:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment