Commit c8a0739b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ntb-4.15' of git://github.com/jonmason/ntb

Pull ntb updates from Jon Mason:
 "Support for the switchtec ntb and related changes. Also, a couple of
  bug fixes"

[ The timing isn't great. I had asked people to send me pull requests
  before my family vacation, and this code has not even been in
  linux-next as far as I can tell. But Logan Gunthorpe pleaded for its
  inclusion because the Switchtec driver has apparently been around for
  a while, just never in linux-next - Linus ]

* tag 'ntb-4.15' of git://github.com/jonmason/ntb:
  ntb: intel: remove b2b memory window workaround for Skylake NTB
  NTB: make idt_89hpes_cfg const
  NTB: switchtec_ntb: Update switchtec documentation with notes for NTB
  NTB: switchtec_ntb: Add memory window support
  NTB: switchtec_ntb: Implement scratchpad registers
  NTB: switchtec_ntb: Implement doorbell registers
  NTB: switchtec_ntb: Add link management
  NTB: switchtec_ntb: Add skeleton NTB driver
  NTB: switchtec_ntb: Initialize hardware for doorbells and messages
  NTB: switchtec_ntb: Initialize hardware for memory windows
  NTB: switchtec_ntb: Introduce initial NTB driver
  NTB: Add check and comment for link up to mw_count() and mw_get_align()
  NTB: Ensure ntb_mw_get_align() is only called when the link is up
  NTB: switchtec: Add link event notifier callback
  NTB: switchtec: Add NTB hardware register definitions
  NTB: switchtec: Export class symbol for use in upper layer driver
  NTB: switchtec: Move structure definitions into a common header
  ntb: update maintainer list for Intel NTB driver
parents 020aae3e 4201a991
...@@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device: ...@@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device:
between PCI Function Framework number (used by the event system) between PCI Function Framework number (used by the event system)
and Switchtec Logic Port ID and Partition number (which is more and Switchtec Logic Port ID and Partition number (which is more
user friendly). user friendly).
Non-Transparent Bridge (NTB) Driver
===================================
An NTB driver is provided for the switchtec hardware in switchtec_ntb.
Currently, it only supports switches configured with exactly 2
partitions. It also requires the following configuration settings:
* Both partitions must be able to access each other's GAS spaces.
Thus, the bits in the GAS Access Vector under Management Settings
must be set to support this.
...@@ -9726,12 +9726,11 @@ S: Supported ...@@ -9726,12 +9726,11 @@ S: Supported
F: drivers/ntb/hw/idt/ F: drivers/ntb/hw/idt/
NTB INTEL DRIVER NTB INTEL DRIVER
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
L: linux-ntb@googlegroups.com L: linux-ntb@googlegroups.com
S: Supported S: Supported
W: https://github.com/jonmason/ntb/wiki W: https://github.com/davejiang/linux/wiki
T: git git://github.com/jonmason/ntb.git T: git https://github.com/davejiang/linux.git
F: drivers/ntb/hw/intel/ F: drivers/ntb/hw/intel/
NTFS FILESYSTEM NTFS FILESYSTEM
...@@ -10443,6 +10442,8 @@ F: Documentation/switchtec.txt ...@@ -10443,6 +10442,8 @@ F: Documentation/switchtec.txt
F: Documentation/ABI/testing/sysfs-class-switchtec F: Documentation/ABI/testing/sysfs-class-switchtec
F: drivers/pci/switch/switchtec* F: drivers/pci/switch/switchtec*
F: include/uapi/linux/switchtec_ioctl.h F: include/uapi/linux/switchtec_ioctl.h
F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
......
source "drivers/ntb/hw/amd/Kconfig" source "drivers/ntb/hw/amd/Kconfig"
source "drivers/ntb/hw/idt/Kconfig" source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig" source "drivers/ntb/hw/intel/Kconfig"
source "drivers/ntb/hw/mscc/Kconfig"
obj-$(CONFIG_NTB_AMD) += amd/ obj-$(CONFIG_NTB_AMD) += amd/
obj-$(CONFIG_NTB_IDT) += idt/ obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/ obj-$(CONFIG_NTB_INTEL) += intel/
obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
...@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev) ...@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev)
/* /*
* IDT PCIe-switch models ports configuration structures * IDT PCIe-switch models ports configuration structures
*/ */
static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
.name = "89HPES24NT6AG2", .name = "89HPES24NT6AG2",
.port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
.name = "89HPES32NT8AG2", .name = "89HPES32NT8AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
.name = "89HPES32NT8BG2", .name = "89HPES32NT8BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
.name = "89HPES12NT12G2", .name = "89HPES12NT12G2",
.port_cnt = 3, .ports = {0, 8, 16} .port_cnt = 3, .ports = {0, 8, 16}
}; };
static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
.name = "89HPES16NT16G2", .name = "89HPES16NT16G2",
.port_cnt = 4, .ports = {0, 8, 12, 16} .port_cnt = 4, .ports = {0, 8, 12, 16}
}; };
static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
.name = "89HPES24NT24G2", .name = "89HPES24NT24G2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
.name = "89HPES32NT24AG2", .name = "89HPES32NT24AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
.name = "89HPES32NT24BG2", .name = "89HPES32NT24BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
......
...@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
void __iomem *mmio; void __iomem *mmio;
resource_size_t bar_size;
phys_addr_t bar_addr; phys_addr_t bar_addr;
int b2b_bar;
u8 bar_sz;
pdev = ndev->ntb.pdev; pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio; mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) {
dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0;
ndev->b2b_off = 0;
} else {
b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
if (b2b_bar < 0)
return -EIO;
dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
dev_dbg(&pdev->dev, "b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1;
} else if (bar_size >= XEON_B2B_MIN_SIZE) {
dev_dbg(&pdev->dev, "b2b using whole bar\n");
ndev->b2b_off = 0;
--ndev->mw_count;
} else {
dev_dbg(&pdev->dev, "b2b bar size is too small\n");
return -EIO;
}
}
/*
* Reset the secondary bar sizes to match the primary bar sizes,
* except disable or halve the size of the b2b secondary bar.
*/
pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
if (b2b_bar == 1) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
if (b2b_bar == 2) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
/* SBAR01 hit by first part of the b2b bar */
if (b2b_bar == 0)
bar_addr = addr->bar0_addr;
else if (b2b_bar == 1)
bar_addr = addr->bar2_addr64;
else if (b2b_bar == 2)
bar_addr = addr->bar4_addr64;
else
return -EIO;
/* setup incoming bar limits == base addrs (zero length windows) */ /* setup incoming bar limits == base addrs (zero length windows) */
bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); bar_addr = addr->bar2_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); bar_addr = addr->bar4_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
......
config NTB_SWITCHTEC
tristate "MicroSemi Switchtec Non-Transparent Bridge Support"
select PCI_SW_SWITCHTEC
help
Enables NTB support for Switchtec PCI switches. This also
selects the Switchtec management driver as they share the same
hardware interface.
If unsure, say N.
obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
/*
* Microsemi Switchtec(tm) PCIe Management Driver
* Copyright (c) 2017, Microsemi Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/switchtec.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/ntb.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Microsemi Corporation");
static ulong max_mw_size = SZ_2M;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size,
"Max memory window size reported to the upper layer");
static bool use_lut_mws;
module_param(use_lut_mws, bool, 0644);
MODULE_PARM_DESC(use_lut_mws,
"Enable the use of the LUT based memory windows");
#ifndef ioread64
#ifdef readq
#define ioread64 readq
#else
#define ioread64 _ioread64
static inline u64 _ioread64(void __iomem *mmio)
{
u64 low, high;
low = ioread32(mmio);
high = ioread32(mmio + sizeof(u32));
return low | (high << 32);
}
#endif
#endif
#ifndef iowrite64
#ifdef writeq
#define iowrite64 writeq
#else
#define iowrite64 _iowrite64
static inline void _iowrite64(u64 val, void __iomem *mmio)
{
iowrite32(val, mmio);
iowrite32(val >> 32, mmio + sizeof(u32));
}
#endif
#endif
#define SWITCHTEC_NTB_MAGIC 0x45CC0001
#define MAX_MWS 128
struct shared_mw {
u32 magic;
u32 link_sta;
u32 partition_id;
u64 mw_sizes[MAX_MWS];
u32 spad[128];
};
#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
#define LUT_SIZE SZ_64K
struct switchtec_ntb {
struct ntb_dev ntb;
struct switchtec_dev *stdev;
int self_partition;
int peer_partition;
int doorbell_irq;
int message_irq;
struct ntb_info_regs __iomem *mmio_ntb;
struct ntb_ctrl_regs __iomem *mmio_ctrl;
struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
struct shared_mw *self_shared;
struct shared_mw __iomem *peer_shared;
dma_addr_t self_shared_dma;
u64 db_mask;
u64 db_valid_mask;
int db_shift;
int db_peer_shift;
/* synchronize rmw access of db_mask and hw reg */
spinlock_t db_mask_lock;
int nr_direct_mw;
int nr_lut_mw;
int direct_mw_to_bar[MAX_DIRECT_MW];
int peer_nr_direct_mw;
int peer_nr_lut_mw;
int peer_direct_mw_to_bar[MAX_DIRECT_MW];
bool link_is_up;
enum ntb_speed link_speed;
enum ntb_width link_width;
};
static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
{
return container_of(ntb, struct switchtec_ntb, ntb);
}
static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *ctl,
u32 op, int wait_status)
{
static const char * const op_text[] = {
[NTB_CTRL_PART_OP_LOCK] = "lock",
[NTB_CTRL_PART_OP_CFG] = "configure",
[NTB_CTRL_PART_OP_RESET] = "reset",
};
int i;
u32 ps;
int status;
switch (op) {
case NTB_CTRL_PART_OP_LOCK:
status = NTB_CTRL_PART_STATUS_LOCKING;
break;
case NTB_CTRL_PART_OP_CFG:
status = NTB_CTRL_PART_STATUS_CONFIGURING;
break;
case NTB_CTRL_PART_OP_RESET:
status = NTB_CTRL_PART_STATUS_RESETTING;
break;
default:
return -EINVAL;
}
iowrite32(op, &ctl->partition_op);
for (i = 0; i < 1000; i++) {
if (msleep_interruptible(50) != 0) {
iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
return -EINTR;
}
ps = ioread32(&ctl->partition_status) & 0xFFFF;
if (ps != status)
break;
}
if (ps == wait_status)
return 0;
if (ps == status) {
dev_err(&sndev->stdev->dev,
"Timed out while peforming %s (%d). (%08x)",
op_text[op], op,
ioread32(&ctl->partition_status));
return -ETIMEDOUT;
}
return -EIO;
}
static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
u32 val)
{
if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
return -EINVAL;
iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
return 0;
}
static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int nr_direct_mw = sndev->peer_nr_direct_mw;
int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (!use_lut_mws)
nr_lut_mw = 0;
return nr_direct_mw + nr_lut_mw;
}
static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
{
return mw_idx - sndev->nr_direct_mw + 1;
}
static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
{
return mw_idx - sndev->peer_nr_direct_mw + 1;
}
static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
int widx, resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int lut;
resource_size_t size;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
lut = widx >= sndev->peer_nr_direct_mw;
size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
if (size == 0)
return -EINVAL;
if (addr_align)
*addr_align = lut ? size : SZ_4K;
if (size_align)
*size_align = lut ? size : SZ_4K;
if (size_max)
*size_max = size;
return 0;
}
static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
int bar = sndev->peer_direct_mw_to_bar[idx];
u32 ctl_val;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(0, &ctl->bar_entry[bar].win_size);
iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
}
static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
}
static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
dma_addr_t addr, resource_size_t size)
{
int xlate_pos = ilog2(size);
int bar = sndev->peer_direct_mw_to_bar[idx];
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
u32 ctl_val;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
iowrite64(sndev->self_partition | addr,
&ctl->bar_entry[bar].xlate_addr);
}
static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
dma_addr_t addr, resource_size_t size)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
&ctl->lut_entry[peer_lut_index(sndev, idx)]);
}
static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
dma_addr_t addr, resource_size_t size)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
int xlate_pos = ilog2(size);
int nr_direct_mw = sndev->peer_nr_direct_mw;
int rc;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
widx, pidx, &addr, &size);
if (widx >= switchtec_ntb_mw_count(ntb, pidx))
return -EINVAL;
if (xlate_pos < 12)
return -EINVAL;
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
if (addr == 0 || size == 0) {
if (widx < nr_direct_mw)
switchtec_ntb_mw_clr_direct(sndev, widx);
else
switchtec_ntb_mw_clr_lut(sndev, widx);
} else {
if (widx < nr_direct_mw)
switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
else
switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
}
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc == -EIO) {
dev_err(&sndev->stdev->dev,
"Hardware reported an error configuring mw %d: %08x",
widx, ioread32(&ctl->bar_error));
if (widx < nr_direct_mw)
switchtec_ntb_mw_clr_direct(sndev, widx);
else
switchtec_ntb_mw_clr_lut(sndev, widx);
switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
}
return rc;
}
static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
}
static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
int idx, phys_addr_t *base,
resource_size_t *size)
{
int bar = sndev->direct_mw_to_bar[idx];
size_t offset = 0;
if (bar < 0)
return -EINVAL;
if (idx == 0) {
/*
* This is the direct BAR shared with the LUTs
* which means the actual window will be offset
* by the size of all the LUT entries.
*/
offset = LUT_SIZE * sndev->nr_lut_mw;
}
if (base)
*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
if (size) {
*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
if (offset && *size > offset)
*size = offset;
if (*size > max_mw_size)
*size = max_mw_size;
}
return 0;
}
static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
int idx, phys_addr_t *base,
resource_size_t *size)
{
int bar = sndev->direct_mw_to_bar[0];
int offset;
offset = LUT_SIZE * lut_index(sndev, idx);
if (base)
*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
if (size)
*size = LUT_SIZE;
return 0;
}
static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
phys_addr_t *base,
resource_size_t *size)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (idx < sndev->nr_direct_mw)
return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
else if (idx < switchtec_ntb_peer_mw_count(ntb))
return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
else
return -EINVAL;
}
static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
int partition,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct switchtec_dev *stdev = sndev->stdev;
u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
if (speed)
*speed = (linksta >> 16) & 0xF;
if (width)
*width = (linksta >> 20) & 0x3F;
}
static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
{
enum ntb_speed self_speed, peer_speed;
enum ntb_width self_width, peer_width;
if (!sndev->link_is_up) {
sndev->link_speed = NTB_SPEED_NONE;
sndev->link_width = NTB_WIDTH_NONE;
return;
}
switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
&self_speed, &self_width);
switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
&peer_speed, &peer_width);
sndev->link_speed = min(self_speed, peer_speed);
sndev->link_width = min(self_width, peer_width);
}
enum {
LINK_MESSAGE = 0,
MSG_LINK_UP = 1,
MSG_LINK_DOWN = 2,
MSG_CHECK_LINK = 3,
};
static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
{
int link_sta;
int old = sndev->link_is_up;
link_sta = sndev->self_shared->link_sta;
if (link_sta) {
u64 peer = ioread64(&sndev->peer_shared->magic);
if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
link_sta = peer >> 32;
else
link_sta = 0;
}
sndev->link_is_up = link_sta;
switchtec_ntb_set_link_speed(sndev);
if (link_sta != old) {
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
ntb_link_event(&sndev->ntb);
dev_info(&sndev->stdev->dev, "ntb link %s",
link_sta ? "up" : "down");
}
}
static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
{
struct switchtec_ntb *sndev = stdev->sndev;
switchtec_ntb_check_link(sndev);
}
static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (speed)
*speed = sndev->link_speed;
if (width)
*width = sndev->link_width;
return sndev->link_is_up;
}
static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed,
enum ntb_width max_width)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
dev_dbg(&sndev->stdev->dev, "enabling link");
sndev->self_shared->link_sta = 1;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
switchtec_ntb_check_link(sndev);
return 0;
}
static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
dev_dbg(&sndev->stdev->dev, "disabling link");
sndev->self_shared->link_sta = 0;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
switchtec_ntb_check_link(sndev);
return 0;
}
static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return sndev->db_valid_mask;
}
static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
{
return 1;
}
static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (db_vector < 0 || db_vector > 1)
return 0;
return sndev->db_valid_mask;
}
static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
{
u64 ret;
struct switchtec_ntb *sndev = ntb_sndev(ntb);
ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
return ret & sndev->db_valid_mask;
}
static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
return 0;
}
static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
unsigned long irqflags;
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (db_bits & ~sndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
sndev->db_mask |= db_bits << sndev->db_shift;
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
return 0;
}
static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
unsigned long irqflags;
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (db_bits & ~sndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
sndev->db_mask &= ~(db_bits << sndev->db_shift);
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
return 0;
}
static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
}
static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
resource_size_t *db_size)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
(unsigned long)sndev->stdev->mmio;
offset += sndev->db_shift / 8;
if (db_addr)
*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
if (db_size)
*db_size = sizeof(u32);
return 0;
}
static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
iowrite64(db_bits << sndev->db_peer_shift,
&sndev->mmio_self_dbmsg->odb);
return 0;
}
static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return ARRAY_SIZE(sndev->self_shared->spad);
}
static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
return 0;
if (!sndev->self_shared)
return 0;
return sndev->self_shared->spad[idx];
}
static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
return -EINVAL;
if (!sndev->self_shared)
return -EIO;
sndev->self_shared->spad[idx] = val;
return 0;
}
static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
int sidx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
return 0;
if (!sndev->peer_shared)
return 0;
return ioread32(&sndev->peer_shared->spad[sidx]);
}
static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
int sidx, u32 val)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
return -EINVAL;
if (!sndev->peer_shared)
return -EIO;
iowrite32(val, &sndev->peer_shared->spad[sidx]);
return 0;
}
static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
int sidx, phys_addr_t *spad_addr)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
(unsigned long)sndev->stdev->mmio;
if (spad_addr)
*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
return 0;
}
static const struct ntb_dev_ops switchtec_ntb_ops = {
.mw_count = switchtec_ntb_mw_count,
.mw_get_align = switchtec_ntb_mw_get_align,
.mw_set_trans = switchtec_ntb_mw_set_trans,
.peer_mw_count = switchtec_ntb_peer_mw_count,
.peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
.link_is_up = switchtec_ntb_link_is_up,
.link_enable = switchtec_ntb_link_enable,
.link_disable = switchtec_ntb_link_disable,
.db_valid_mask = switchtec_ntb_db_valid_mask,
.db_vector_count = switchtec_ntb_db_vector_count,
.db_vector_mask = switchtec_ntb_db_vector_mask,
.db_read = switchtec_ntb_db_read,
.db_clear = switchtec_ntb_db_clear,
.db_set_mask = switchtec_ntb_db_set_mask,
.db_clear_mask = switchtec_ntb_db_clear_mask,
.db_read_mask = switchtec_ntb_db_read_mask,
.peer_db_addr = switchtec_ntb_peer_db_addr,
.peer_db_set = switchtec_ntb_peer_db_set,
.spad_count = switchtec_ntb_spad_count,
.spad_read = switchtec_ntb_spad_read,
.spad_write = switchtec_ntb_spad_write,
.peer_spad_read = switchtec_ntb_peer_spad_read,
.peer_spad_write = switchtec_ntb_peer_spad_write,
.peer_spad_addr = switchtec_ntb_peer_spad_addr,
};
static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
{
u64 part_map;
sndev->ntb.pdev = sndev->stdev->pdev;
sndev->ntb.topo = NTB_TOPO_SWITCH;
sndev->ntb.ops = &switchtec_ntb_ops;
sndev->self_partition = sndev->stdev->partition;
sndev->mmio_ntb = sndev->stdev->mmio_ntb;
part_map = ioread64(&sndev->mmio_ntb->ep_map);
part_map &= ~(1 << sndev->self_partition);
sndev->peer_partition = ffs(part_map) - 1;
dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
sndev->self_partition, sndev->stdev->partition_count,
part_map);
sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
SWITCHTEC_NTB_REG_CTRL_OFFSET;
sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
SWITCHTEC_NTB_REG_DBMSG_OFFSET;
sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
}
static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
{
int i;
int cnt = 0;
for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
u32 r = ioread32(&ctrl->bar_entry[i].ctl);
if (r & NTB_CTRL_BAR_VALID)
map[cnt++] = i;
}
return cnt;
}
static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
{
sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
sndev->mmio_self_ctrl);
sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
sndev->nr_direct_mw, sndev->nr_lut_mw);
sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
sndev->mmio_peer_ctrl);
sndev->peer_nr_lut_mw =
ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
}
/*
* There are 64 doorbells in the switch hardware but this is
* shared among all partitions. So we must split them in half
* (32 for each partition). However, the message interrupts are
* also shared with the top 4 doorbells so we just limit this to
* 28 doorbells per partition
*/
static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
{
sndev->db_valid_mask = 0x0FFFFFFF;
if (sndev->self_partition < sndev->peer_partition) {
sndev->db_shift = 0;
sndev->db_peer_shift = 32;
} else {
sndev->db_shift = 32;
sndev->db_peer_shift = 0;
}
sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
&sndev->mmio_self_dbmsg->odb_mask);
}
static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
{
int i;
u32 msg_map = 0;
for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
int m = i | sndev->peer_partition << 2;
msg_map |= m << i * 8;
}
iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
&sndev->mmio_self_dbmsg->imsg[i]);
}
static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
{
int rc = 0;
u16 req_id;
u32 error;
req_id = ioread16(&sndev->mmio_ntb->requester_id);
if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
dev_err(&sndev->stdev->dev,
"Not enough requester IDs available.");
return -EFAULT;
}
rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
&sndev->mmio_self_ctrl->partition_ctrl);
/*
* Root Complex Requester ID (which is 0:00.0)
*/
iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
&sndev->mmio_self_ctrl->req_id_table[0]);
/*
* Host Bridge Requester ID (as read from the mmap address)
*/
iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
&sndev->mmio_self_ctrl->req_id_table[1]);
rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc == -EIO) {
error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
dev_err(&sndev->stdev->dev,
"Error setting up the requester ID table: %08x",
error);
}
return rc;
}
static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
{
int i;
memset(sndev->self_shared, 0, LUT_SIZE);
sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
sndev->self_shared->partition_id = sndev->stdev->partition;
for (i = 0; i < sndev->nr_direct_mw; i++) {
int bar = sndev->direct_mw_to_bar[i];
resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
if (i == 0)
sz = min_t(resource_size_t, sz,
LUT_SIZE * sndev->nr_lut_mw);
sndev->self_shared->mw_sizes[i] = sz;
}
for (i = 0; i < sndev->nr_lut_mw; i++) {
int idx = sndev->nr_direct_mw + i;
sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
}
}
static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
int bar = sndev->direct_mw_to_bar[0];
u32 ctl_val;
int rc;
sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
LUT_SIZE,
&sndev->self_shared_dma,
GFP_KERNEL);
if (!sndev->self_shared) {
dev_err(&sndev->stdev->dev,
"unable to allocate memory for shared mw");
return -ENOMEM;
}
switchtec_ntb_init_shared(sndev);
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
goto unalloc_and_exit;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val &= 0xFF;
ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
ctl_val |= ilog2(LUT_SIZE) << 8;
ctl_val |= (sndev->nr_lut_mw - 1) << 14;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
sndev->self_shared_dma),
&ctl->lut_entry[0]);
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc) {
u32 bar_error, lut_error;
bar_error = ioread32(&ctl->bar_error);
lut_error = ioread32(&ctl->lut_error);
dev_err(&sndev->stdev->dev,
"Error setting up shared MW: %08x / %08x",
bar_error, lut_error);
goto unalloc_and_exit;
}
sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
if (!sndev->peer_shared) {
rc = -ENOMEM;
goto unalloc_and_exit;
}
dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
return 0;
unalloc_and_exit:
dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
sndev->self_shared, sndev->self_shared_dma);
return rc;
}
static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
{
if (sndev->peer_shared)
pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
if (sndev->self_shared)
dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
sndev->self_shared,
sndev->self_shared_dma);
}
static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
{
struct switchtec_ntb *sndev = dev;
dev_dbg(&sndev->stdev->dev, "doorbell\n");
ntb_db_event(&sndev->ntb, 0);
return IRQ_HANDLED;
}
static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
{
int i;
struct switchtec_ntb *sndev = dev;
for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
if (msg & NTB_DBMSG_IMSG_STATUS) {
dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
(u32)msg);
iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
if (i == LINK_MESSAGE)
switchtec_ntb_check_link(sndev);
}
}
return IRQ_HANDLED;
}
static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
{
int i;
int rc;
int doorbell_irq = 0;
int message_irq = 0;
int event_irq;
int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
while (doorbell_irq == event_irq)
doorbell_irq++;
while (message_irq == doorbell_irq ||
message_irq == event_irq)
message_irq++;
dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
event_irq, doorbell_irq, message_irq);
for (i = 0; i < idb_vecs - 4; i++)
iowrite8(doorbell_irq,
&sndev->mmio_self_dbmsg->idb_vec_map[i]);
for (; i < idb_vecs; i++)
iowrite8(message_irq,
&sndev->mmio_self_dbmsg->idb_vec_map[i]);
sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
rc = request_irq(sndev->doorbell_irq,
switchtec_ntb_doorbell_isr, 0,
"switchtec_ntb_doorbell", sndev);
if (rc)
return rc;
rc = request_irq(sndev->message_irq,
switchtec_ntb_message_isr, 0,
"switchtec_ntb_message", sndev);
if (rc) {
free_irq(sndev->doorbell_irq, sndev);
return rc;
}
return 0;
}
static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
{
free_irq(sndev->doorbell_irq, sndev);
free_irq(sndev->message_irq, sndev);
}
static int switchtec_ntb_add(struct device *dev,
struct class_interface *class_intf)
{
struct switchtec_dev *stdev = to_stdev(dev);
struct switchtec_ntb *sndev;
int rc;
stdev->sndev = NULL;
if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
return -ENODEV;
if (stdev->partition_count != 2)
dev_warn(dev, "ntb driver only supports 2 partitions");
sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
if (!sndev)
return -ENOMEM;
sndev->stdev = stdev;
switchtec_ntb_init_sndev(sndev);
switchtec_ntb_init_mw(sndev);
switchtec_ntb_init_db(sndev);
switchtec_ntb_init_msgs(sndev);
rc = switchtec_ntb_init_req_id_table(sndev);
if (rc)
goto free_and_exit;
rc = switchtec_ntb_init_shared_mw(sndev);
if (rc)
goto free_and_exit;
rc = switchtec_ntb_init_db_msg_irq(sndev);
if (rc)
goto deinit_shared_and_exit;
rc = ntb_register_device(&sndev->ntb);
if (rc)
goto deinit_and_exit;
stdev->sndev = sndev;
stdev->link_notifier = switchtec_ntb_link_notification;
dev_info(dev, "NTB device registered");
return 0;
deinit_and_exit:
switchtec_ntb_deinit_db_msg_irq(sndev);
deinit_shared_and_exit:
switchtec_ntb_deinit_shared_mw(sndev);
free_and_exit:
kfree(sndev);
dev_err(dev, "failed to register ntb device: %d", rc);
return rc;
}
void switchtec_ntb_remove(struct device *dev,
struct class_interface *class_intf)
{
struct switchtec_dev *stdev = to_stdev(dev);
struct switchtec_ntb *sndev = stdev->sndev;
if (!sndev)
return;
stdev->link_notifier = NULL;
stdev->sndev = NULL;
ntb_unregister_device(&sndev->ntb);
switchtec_ntb_deinit_db_msg_irq(sndev);
switchtec_ntb_deinit_shared_mw(sndev);
kfree(sndev);
dev_info(dev, "ntb device unregistered");
}
static struct class_interface switchtec_interface = {
.add_dev = switchtec_ntb_add,
.remove_dev = switchtec_ntb_remove,
};
static int __init switchtec_ntb_init(void)
{
switchtec_interface.class = switchtec_class;
return class_interface_register(&switchtec_interface);
}
module_init(switchtec_ntb_init);
static void __exit switchtec_ntb_exit(void)
{
class_interface_unregister(&switchtec_interface);
}
module_exit(switchtec_ntb_exit);
...@@ -191,8 +191,6 @@ struct ntb_transport_qp { ...@@ -191,8 +191,6 @@ struct ntb_transport_qp {
struct ntb_transport_mw { struct ntb_transport_mw {
phys_addr_t phys_addr; phys_addr_t phys_addr;
resource_size_t phys_size; resource_size_t phys_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
void __iomem *vbase; void __iomem *vbase;
size_t xlat_size; size_t xlat_size;
size_t buff_size; size_t buff_size;
...@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, ...@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev; struct pci_dev *pdev = nt->ndev->pdev;
size_t xlat_size, buff_size; size_t xlat_size, buff_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
int rc; int rc;
if (!size) if (!size)
return -EINVAL; return -EINVAL;
xlat_size = round_up(size, mw->xlat_align_size); rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
buff_size = round_up(size, mw->xlat_align); &xlat_align_size, NULL);
if (rc)
return rc;
xlat_size = round_up(size, xlat_align_size);
buff_size = round_up(size, xlat_align);
/* No need to re-setup */ /* No need to re-setup */
if (mw->xlat_size == xlat_size) if (mw->xlat_size == xlat_size)
...@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, ...@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
* is a requirement of the hardware. It is recommended to setup CMA * is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB. * for BAR sizes equal or greater than 4MB.
*/ */
if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
&mw->dma_addr); &mw->dma_addr);
ntb_free_mw(nt, num_mw); ntb_free_mw(nt, num_mw);
...@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
for (i = 0; i < mw_count; i++) { for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i]; mw = &nt->mw_vec[i];
rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
&mw->xlat_align_size, NULL);
if (rc)
goto err1;
rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
&mw->phys_size); &mw->phys_size);
if (rc) if (rc)
......
...@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)") ...@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)")
struct perf_mw { struct perf_mw {
phys_addr_t phys_addr; phys_addr_t phys_addr;
resource_size_t phys_size; resource_size_t phys_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
void __iomem *vbase; void __iomem *vbase;
size_t xlat_size; size_t xlat_size;
size_t buf_size; size_t buf_size;
...@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) ...@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
{ {
struct perf_mw *mw = &perf->mw; struct perf_mw *mw = &perf->mw;
size_t xlat_size, buf_size; size_t xlat_size, buf_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
int rc; int rc;
if (!size) if (!size)
return -EINVAL; return -EINVAL;
xlat_size = round_up(size, mw->xlat_align_size); rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align,
buf_size = round_up(size, mw->xlat_align); &xlat_align_size, NULL);
if (rc)
return rc;
xlat_size = round_up(size, xlat_align_size);
buf_size = round_up(size, xlat_align);
if (mw->xlat_size == xlat_size) if (mw->xlat_size == xlat_size)
return 0; return 0;
...@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf) ...@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
mw = &perf->mw; mw = &perf->mw;
rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
&mw->xlat_align_size, NULL);
if (rc)
return rc;
rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size); rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
if (rc) if (rc)
return rc; return rc;
......
...@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, ...@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
phys_addr_t base; phys_addr_t base;
resource_size_t mw_size; resource_size_t mw_size;
resource_size_t align_addr; resource_size_t align_addr = 0;
resource_size_t align_size; resource_size_t align_size = 0;
resource_size_t max_size; resource_size_t max_size = 0;
buf_size = min_t(size_t, size, 512); buf_size = min_t(size_t, size, 512);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* *
*/ */
#include <linux/switchtec.h>
#include <linux/switchtec_ioctl.h> #include <linux/switchtec_ioctl.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -20,8 +21,6 @@ ...@@ -20,8 +21,6 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/wait.h> #include <linux/wait.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
...@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644); ...@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
static dev_t switchtec_devt; static dev_t switchtec_devt;
static struct class *switchtec_class;
static DEFINE_IDA(switchtec_minor_ida); static DEFINE_IDA(switchtec_minor_ida);
#define MICROSEMI_VENDOR_ID 0x11f8 struct class *switchtec_class;
#define MICROSEMI_NTB_CLASSCODE 0x068000 EXPORT_SYMBOL_GPL(switchtec_class);
#define MICROSEMI_MGMT_CLASSCODE 0x058000
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
#define SWITCHTEC_MAX_PFF_CSR 48
#define SWITCHTEC_EVENT_OCCURRED BIT(0)
#define SWITCHTEC_EVENT_CLEAR BIT(0)
#define SWITCHTEC_EVENT_EN_LOG BIT(1)
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
};
struct mrpc_regs {
u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u32 cmd;
u32 status;
u32 ret_value;
} __packed;
enum mrpc_status {
SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
SWITCHTEC_MRPC_STATUS_DONE = 2,
SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
};
struct sw_event_regs {
u64 event_report_ctrl;
u64 reserved1;
u64 part_event_bitmap;
u64 reserved2;
u32 global_summary;
u32 reserved3[3];
u32 stack_error_event_hdr;
u32 stack_error_event_data;
u32 reserved4[4];
u32 ppu_error_event_hdr;
u32 ppu_error_event_data;
u32 reserved5[4];
u32 isp_error_event_hdr;
u32 isp_error_event_data;
u32 reserved6[4];
u32 sys_reset_event_hdr;
u32 reserved7[5];
u32 fw_exception_hdr;
u32 reserved8[5];
u32 fw_nmi_hdr;
u32 reserved9[5];
u32 fw_non_fatal_hdr;
u32 reserved10[5];
u32 fw_fatal_hdr;
u32 reserved11[5];
u32 twi_mrpc_comp_hdr;
u32 twi_mrpc_comp_data;
u32 reserved12[4];
u32 twi_mrpc_comp_async_hdr;
u32 twi_mrpc_comp_async_data;
u32 reserved13[4];
u32 cli_mrpc_comp_hdr;
u32 cli_mrpc_comp_data;
u32 reserved14[4];
u32 cli_mrpc_comp_async_hdr;
u32 cli_mrpc_comp_async_data;
u32 reserved15[4];
u32 gpio_interrupt_hdr;
u32 gpio_interrupt_data;
u32 reserved16[4];
} __packed;
enum {
SWITCHTEC_CFG0_RUNNING = 0x04,
SWITCHTEC_CFG1_RUNNING = 0x05,
SWITCHTEC_IMG0_RUNNING = 0x03,
SWITCHTEC_IMG1_RUNNING = 0x07,
};
struct sys_info_regs {
u32 device_id;
u32 device_version;
u32 firmware_version;
u32 reserved1;
u32 vendor_table_revision;
u32 table_format_version;
u32 partition_id;
u32 cfg_file_fmt_version;
u16 cfg_running;
u16 img_running;
u32 reserved2[57];
char vendor_id[8];
char product_id[16];
char product_revision[4];
char component_vendor[8];
u16 component_id;
u8 component_revision;
} __packed;
struct flash_info_regs {
u32 flash_part_map_upd_idx;
struct active_partition_info {
u32 address;
u32 build_version;
u32 build_string;
} active_img;
struct active_partition_info active_cfg;
struct active_partition_info inactive_img;
struct active_partition_info inactive_cfg;
u32 flash_length;
struct partition_info {
u32 address;
u32 length;
} cfg0;
struct partition_info cfg1;
struct partition_info img0;
struct partition_info img1;
struct partition_info nvlog;
struct partition_info vendor[8];
};
struct ntb_info_regs {
u8 partition_count;
u8 partition_id;
u16 reserved1;
u64 ep_map;
u16 requester_id;
} __packed;
struct part_cfg_regs {
u32 status;
u32 state;
u32 port_cnt;
u32 usp_port_mode;
u32 usp_pff_inst_id;
u32 vep_pff_inst_id;
u32 dsp_pff_inst_id[47];
u32 reserved1[11];
u16 vep_vector_number;
u16 usp_vector_number;
u32 port_event_bitmap;
u32 reserved2[3];
u32 part_event_summary;
u32 reserved3[3];
u32 part_reset_hdr;
u32 part_reset_data[5];
u32 mrpc_comp_hdr;
u32 mrpc_comp_data[5];
u32 mrpc_comp_async_hdr;
u32 mrpc_comp_async_data[5];
u32 dyn_binding_hdr;
u32 dyn_binding_data[5];
u32 reserved4[159];
} __packed;
enum {
SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
};
struct pff_csr_regs {
u16 vendor_id;
u16 device_id;
u32 pci_cfg_header[15];
u32 pci_cap_region[48];
u32 pcie_cap_region[448];
u32 indirect_gas_window[128];
u32 indirect_gas_window_off;
u32 reserved[127];
u32 pff_event_summary;
u32 reserved2[3];
u32 aer_in_p2p_hdr;
u32 aer_in_p2p_data[5];
u32 aer_in_vep_hdr;
u32 aer_in_vep_data[5];
u32 dpc_hdr;
u32 dpc_data[5];
u32 cts_hdr;
u32 cts_data[5];
u32 reserved3[6];
u32 hotplug_hdr;
u32 hotplug_data[5];
u32 ier_hdr;
u32 ier_data[5];
u32 threshold_hdr;
u32 threshold_data[5];
u32 power_mgmt_hdr;
u32 power_mgmt_data[5];
u32 tlp_throttling_hdr;
u32 tlp_throttling_data[5];
u32 force_speed_hdr;
u32 force_speed_data[5];
u32 credit_timeout_hdr;
u32 credit_timeout_data[5];
u32 link_state_hdr;
u32 link_state_data[5];
u32 reserved4[174];
} __packed;
struct switchtec_dev {
struct pci_dev *pdev;
struct device dev;
struct cdev cdev;
int partition;
int partition_count;
int pff_csr_count;
char pff_local[SWITCHTEC_MAX_PFF_CSR];
void __iomem *mmio;
struct mrpc_regs __iomem *mmio_mrpc;
struct sw_event_regs __iomem *mmio_sw_event;
struct sys_info_regs __iomem *mmio_sys_info;
struct flash_info_regs __iomem *mmio_flash_info;
struct ntb_info_regs __iomem *mmio_ntb;
struct part_cfg_regs __iomem *mmio_part_cfg;
struct part_cfg_regs __iomem *mmio_part_cfg_all;
struct pff_csr_regs __iomem *mmio_pff_csr;
/*
* The mrpc mutex must be held when accessing the other
* mrpc_ fields, alive flag and stuser->state field
*/
struct mutex mrpc_mutex;
struct list_head mrpc_queue;
int mrpc_busy;
struct work_struct mrpc_work;
struct delayed_work mrpc_timeout;
bool alive;
wait_queue_head_t event_wq;
atomic_t event_cnt;
};
static struct switchtec_dev *to_stdev(struct device *dev)
{
return container_of(dev, struct switchtec_dev, dev);
}
enum mrpc_state { enum mrpc_state {
MRPC_IDLE = 0, MRPC_IDLE = 0,
...@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = { ...@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = {
.compat_ioctl = switchtec_dev_ioctl, .compat_ioctl = switchtec_dev_ioctl,
}; };
static void link_event_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
stdev = container_of(work, struct switchtec_dev, link_event_work);
if (stdev->link_notifier)
stdev->link_notifier(stdev);
}
static void check_link_state_events(struct switchtec_dev *stdev)
{
int idx;
u32 reg;
int count;
int occurred = 0;
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
count = (reg >> 5) & 0xFF;
if (count != stdev->link_event_count[idx]) {
occurred = 1;
stdev->link_event_count[idx] = count;
}
}
if (occurred)
schedule_work(&stdev->link_event_work);
}
static void enable_link_state_events(struct switchtec_dev *stdev)
{
int idx;
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_pff_csr[idx].link_state_hdr);
}
}
static void stdev_release(struct device *dev) static void stdev_release(struct device *dev)
{ {
struct switchtec_dev *stdev = to_stdev(dev); struct switchtec_dev *stdev = to_stdev(dev);
...@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) ...@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
stdev->mrpc_busy = 0; stdev->mrpc_busy = 0;
INIT_WORK(&stdev->mrpc_work, mrpc_event_work); INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
INIT_WORK(&stdev->link_event_work, link_event_work);
init_waitqueue_head(&stdev->event_wq); init_waitqueue_head(&stdev->event_wq);
atomic_set(&stdev->event_cnt, 0); atomic_set(&stdev->event_cnt, 0);
...@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) ...@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0; return 0;
if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
return 0;
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg); iowrite32(hdr, hdr_reg);
...@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid) ...@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid)
for (idx = 0; idx < stdev->pff_csr_count; idx++) { for (idx = 0; idx < stdev->pff_csr_count; idx++) {
if (!stdev->pff_local[idx]) if (!stdev->pff_local[idx])
continue; continue;
count += mask_event(stdev, eid, idx); count += mask_event(stdev, eid, idx);
} }
} else { } else {
...@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev) ...@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
} }
check_link_state_events(stdev);
for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
event_count += mask_all_events(stdev, eid); event_count += mask_all_events(stdev, eid);
...@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev, ...@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev; struct switchtec_dev *stdev;
int rc; int rc;
if (pdev->class == MICROSEMI_NTB_CLASSCODE)
request_module_nowait("ntb_hw_switchtec");
stdev = stdev_create(pdev); stdev = stdev_create(pdev);
if (IS_ERR(stdev)) if (IS_ERR(stdev))
return PTR_ERR(stdev); return PTR_ERR(stdev);
...@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, ...@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
iowrite32(SWITCHTEC_EVENT_CLEAR | iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ, SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr); &stdev->mmio_part_cfg->mrpc_comp_hdr);
enable_link_state_events(stdev);
rc = cdev_device_add(&stdev->cdev, &stdev->dev); rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc) if (rc)
......
...@@ -70,6 +70,7 @@ struct pci_dev; ...@@ -70,6 +70,7 @@ struct pci_dev;
* @NTB_TOPO_SEC: On secondary side of remote ntb. * @NTB_TOPO_SEC: On secondary side of remote ntb.
* @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
* @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
* @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
*/ */
enum ntb_topo { enum ntb_topo {
NTB_TOPO_NONE = -1, NTB_TOPO_NONE = -1,
...@@ -77,6 +78,7 @@ enum ntb_topo { ...@@ -77,6 +78,7 @@ enum ntb_topo {
NTB_TOPO_SEC, NTB_TOPO_SEC,
NTB_TOPO_B2B_USD, NTB_TOPO_B2B_USD,
NTB_TOPO_B2B_DSD, NTB_TOPO_B2B_DSD,
NTB_TOPO_SWITCH,
}; };
static inline int ntb_topo_is_b2b(enum ntb_topo topo) static inline int ntb_topo_is_b2b(enum ntb_topo topo)
...@@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo) ...@@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo)
case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
} }
return "NTB_TOPO_INVALID"; return "NTB_TOPO_INVALID";
} }
...@@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb) ...@@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
* Hardware and topology may support a different number of memory windows. * Hardware and topology may support a different number of memory windows.
* Moreover different peer devices can support different number of memory * Moreover different peer devices can support different number of memory
* windows. Simply speaking this method returns the number of possible inbound * windows. Simply speaking this method returns the number of possible inbound
* memory windows to share with specified peer device. * memory windows to share with specified peer device. Note: this may return
* zero if the link is not up yet.
* *
* Return: the number of memory windows. * Return: the number of memory windows.
*/ */
...@@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx) ...@@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
* Get the alignments of an inbound memory window with specified index. * Get the alignments of an inbound memory window with specified index.
* NULL may be given for any output parameter if the value is not needed. * NULL may be given for any output parameter if the value is not needed.
* The alignment and size parameters may be used for allocation of proper * The alignment and size parameters may be used for allocation of proper
* shared memory. * shared memory. Note: this must only be called when the link is up.
* *
* Return: Zero on success, otherwise a negative error number. * Return: Zero on success, otherwise a negative error number.
*/ */
...@@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, ...@@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
resource_size_t *size_align, resource_size_t *size_align,
resource_size_t *size_max) resource_size_t *size_max)
{ {
if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx)))
return -ENOTCONN;
return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
size_max); size_max);
} }
......
/*
* Microsemi Switchtec PCIe Driver
* Copyright (c) 2017, Microsemi Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _SWITCHTEC_H
#define _SWITCHTEC_H
#include <linux/pci.h>
#include <linux/cdev.h>
#define MICROSEMI_VENDOR_ID 0x11f8
#define MICROSEMI_NTB_CLASSCODE 0x068000
#define MICROSEMI_MGMT_CLASSCODE 0x058000
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
#define SWITCHTEC_MAX_PFF_CSR 48
#define SWITCHTEC_EVENT_OCCURRED BIT(0)
#define SWITCHTEC_EVENT_CLEAR BIT(0)
#define SWITCHTEC_EVENT_EN_LOG BIT(1)
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
};
struct mrpc_regs {
u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u32 cmd;
u32 status;
u32 ret_value;
} __packed;
enum mrpc_status {
SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
SWITCHTEC_MRPC_STATUS_DONE = 2,
SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
};
struct sw_event_regs {
u64 event_report_ctrl;
u64 reserved1;
u64 part_event_bitmap;
u64 reserved2;
u32 global_summary;
u32 reserved3[3];
u32 stack_error_event_hdr;
u32 stack_error_event_data;
u32 reserved4[4];
u32 ppu_error_event_hdr;
u32 ppu_error_event_data;
u32 reserved5[4];
u32 isp_error_event_hdr;
u32 isp_error_event_data;
u32 reserved6[4];
u32 sys_reset_event_hdr;
u32 reserved7[5];
u32 fw_exception_hdr;
u32 reserved8[5];
u32 fw_nmi_hdr;
u32 reserved9[5];
u32 fw_non_fatal_hdr;
u32 reserved10[5];
u32 fw_fatal_hdr;
u32 reserved11[5];
u32 twi_mrpc_comp_hdr;
u32 twi_mrpc_comp_data;
u32 reserved12[4];
u32 twi_mrpc_comp_async_hdr;
u32 twi_mrpc_comp_async_data;
u32 reserved13[4];
u32 cli_mrpc_comp_hdr;
u32 cli_mrpc_comp_data;
u32 reserved14[4];
u32 cli_mrpc_comp_async_hdr;
u32 cli_mrpc_comp_async_data;
u32 reserved15[4];
u32 gpio_interrupt_hdr;
u32 gpio_interrupt_data;
u32 reserved16[4];
} __packed;
enum {
SWITCHTEC_CFG0_RUNNING = 0x04,
SWITCHTEC_CFG1_RUNNING = 0x05,
SWITCHTEC_IMG0_RUNNING = 0x03,
SWITCHTEC_IMG1_RUNNING = 0x07,
};
struct sys_info_regs {
u32 device_id;
u32 device_version;
u32 firmware_version;
u32 reserved1;
u32 vendor_table_revision;
u32 table_format_version;
u32 partition_id;
u32 cfg_file_fmt_version;
u16 cfg_running;
u16 img_running;
u32 reserved2[57];
char vendor_id[8];
char product_id[16];
char product_revision[4];
char component_vendor[8];
u16 component_id;
u8 component_revision;
} __packed;
struct flash_info_regs {
u32 flash_part_map_upd_idx;
struct active_partition_info {
u32 address;
u32 build_version;
u32 build_string;
} active_img;
struct active_partition_info active_cfg;
struct active_partition_info inactive_img;
struct active_partition_info inactive_cfg;
u32 flash_length;
struct partition_info {
u32 address;
u32 length;
} cfg0;
struct partition_info cfg1;
struct partition_info img0;
struct partition_info img1;
struct partition_info nvlog;
struct partition_info vendor[8];
};
enum {
SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000,
SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000,
SWITCHTEC_NTB_REG_DBMSG_OFFSET = 0x64000,
};
struct ntb_info_regs {
u8 partition_count;
u8 partition_id;
u16 reserved1;
u64 ep_map;
u16 requester_id;
} __packed;
struct part_cfg_regs {
u32 status;
u32 state;
u32 port_cnt;
u32 usp_port_mode;
u32 usp_pff_inst_id;
u32 vep_pff_inst_id;
u32 dsp_pff_inst_id[47];
u32 reserved1[11];
u16 vep_vector_number;
u16 usp_vector_number;
u32 port_event_bitmap;
u32 reserved2[3];
u32 part_event_summary;
u32 reserved3[3];
u32 part_reset_hdr;
u32 part_reset_data[5];
u32 mrpc_comp_hdr;
u32 mrpc_comp_data[5];
u32 mrpc_comp_async_hdr;
u32 mrpc_comp_async_data[5];
u32 dyn_binding_hdr;
u32 dyn_binding_data[5];
u32 reserved4[159];
} __packed;
enum {
NTB_CTRL_PART_OP_LOCK = 0x1,
NTB_CTRL_PART_OP_CFG = 0x2,
NTB_CTRL_PART_OP_RESET = 0x3,
NTB_CTRL_PART_STATUS_NORMAL = 0x1,
NTB_CTRL_PART_STATUS_LOCKED = 0x2,
NTB_CTRL_PART_STATUS_LOCKING = 0x3,
NTB_CTRL_PART_STATUS_CONFIGURING = 0x4,
NTB_CTRL_PART_STATUS_RESETTING = 0x5,
NTB_CTRL_BAR_VALID = 1 << 0,
NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4,
NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5,
NTB_CTRL_REQ_ID_EN = 1 << 0,
NTB_CTRL_LUT_EN = 1 << 0,
NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
};
struct ntb_ctrl_regs {
u32 partition_status;
u32 partition_op;
u32 partition_ctrl;
u32 bar_setup;
u32 bar_error;
u16 lut_table_entries;
u16 lut_table_offset;
u32 lut_error;
u16 req_id_table_size;
u16 req_id_table_offset;
u32 req_id_error;
u32 reserved1[7];
struct {
u32 ctl;
u32 win_size;
u64 xlate_addr;
} bar_entry[6];
u32 reserved2[216];
u32 req_id_table[256];
u32 reserved3[512];
u64 lut_entry[512];
} __packed;
#define NTB_DBMSG_IMSG_STATUS BIT_ULL(32)
#define NTB_DBMSG_IMSG_MASK BIT_ULL(40)
struct ntb_dbmsg_regs {
u32 reserved1[1024];
u64 odb;
u64 odb_mask;
u64 idb;
u64 idb_mask;
u8 idb_vec_map[64];
u32 msg_map;
u32 reserved2;
struct {
u32 msg;
u32 status;
} omsg[4];
struct {
u32 msg;
u8 status;
u8 mask;
u8 src;
u8 reserved;
} imsg[4];
u8 reserved3[3928];
u8 msix_table[1024];
u8 reserved4[3072];
u8 pba[24];
u8 reserved5[4072];
} __packed;
enum {
SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
};
struct pff_csr_regs {
u16 vendor_id;
u16 device_id;
u32 pci_cfg_header[15];
u32 pci_cap_region[48];
u32 pcie_cap_region[448];
u32 indirect_gas_window[128];
u32 indirect_gas_window_off;
u32 reserved[127];
u32 pff_event_summary;
u32 reserved2[3];
u32 aer_in_p2p_hdr;
u32 aer_in_p2p_data[5];
u32 aer_in_vep_hdr;
u32 aer_in_vep_data[5];
u32 dpc_hdr;
u32 dpc_data[5];
u32 cts_hdr;
u32 cts_data[5];
u32 reserved3[6];
u32 hotplug_hdr;
u32 hotplug_data[5];
u32 ier_hdr;
u32 ier_data[5];
u32 threshold_hdr;
u32 threshold_data[5];
u32 power_mgmt_hdr;
u32 power_mgmt_data[5];
u32 tlp_throttling_hdr;
u32 tlp_throttling_data[5];
u32 force_speed_hdr;
u32 force_speed_data[5];
u32 credit_timeout_hdr;
u32 credit_timeout_data[5];
u32 link_state_hdr;
u32 link_state_data[5];
u32 reserved4[174];
} __packed;
struct switchtec_ntb;
struct switchtec_dev {
struct pci_dev *pdev;
struct device dev;
struct cdev cdev;
int partition;
int partition_count;
int pff_csr_count;
char pff_local[SWITCHTEC_MAX_PFF_CSR];
void __iomem *mmio;
struct mrpc_regs __iomem *mmio_mrpc;
struct sw_event_regs __iomem *mmio_sw_event;
struct sys_info_regs __iomem *mmio_sys_info;
struct flash_info_regs __iomem *mmio_flash_info;
struct ntb_info_regs __iomem *mmio_ntb;
struct part_cfg_regs __iomem *mmio_part_cfg;
struct part_cfg_regs __iomem *mmio_part_cfg_all;
struct pff_csr_regs __iomem *mmio_pff_csr;
/*
* The mrpc mutex must be held when accessing the other
* mrpc_ fields, alive flag and stuser->state field
*/
struct mutex mrpc_mutex;
struct list_head mrpc_queue;
int mrpc_busy;
struct work_struct mrpc_work;
struct delayed_work mrpc_timeout;
bool alive;
wait_queue_head_t event_wq;
atomic_t event_cnt;
struct work_struct link_event_work;
void (*link_notifier)(struct switchtec_dev *stdev);
u8 link_event_count[SWITCHTEC_MAX_PFF_CSR];
struct switchtec_ntb *sndev;
};
static inline struct switchtec_dev *to_stdev(struct device *dev)
{
return container_of(dev, struct switchtec_dev, dev);
}
extern struct class *switchtec_class;
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment