Commit c8a0739b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ntb-4.15' of git://github.com/jonmason/ntb

Pull ntb updates from Jon Mason:
 "Support for the switchtec ntb and related changes. Also, a couple of
  bug fixes"

[ The timing isn't great. I had asked people to send me pull requests
  before my family vacation, and this code has not even been in
  linux-next as far as I can tell. But Logan Gunthorpe pleaded for its
  inclusion because the Switchtec driver has apparently been around for
  a while, just never in linux-next - Linus ]

* tag 'ntb-4.15' of git://github.com/jonmason/ntb:
  ntb: intel: remove b2b memory window workaround for Skylake NTB
  NTB: make idt_89hpes_cfg const
  NTB: switchtec_ntb: Update switchtec documentation with notes for NTB
  NTB: switchtec_ntb: Add memory window support
  NTB: switchtec_ntb: Implement scratchpad registers
  NTB: switchtec_ntb: Implement doorbell registers
  NTB: switchtec_ntb: Add link management
  NTB: switchtec_ntb: Add skeleton NTB driver
  NTB: switchtec_ntb: Initialize hardware for doorbells and messages
  NTB: switchtec_ntb: Initialize hardware for memory windows
  NTB: switchtec_ntb: Introduce initial NTB driver
  NTB: Add check and comment for link up to mw_count() and mw_get_align()
  NTB: Ensure ntb_mw_get_align() is only called when the link is up
  NTB: switchtec: Add link event notifier callback
  NTB: switchtec: Add NTB hardware register definitions
  NTB: switchtec: Export class symbol for use in upper layer driver
  NTB: switchtec: Move structure definitions into a common header
  ntb: update maintainer list for Intel NTB driver
parents 020aae3e 4201a991
...@@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device: ...@@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device:
between PCI Function Framework number (used by the event system) between PCI Function Framework number (used by the event system)
and Switchtec Logic Port ID and Partition number (which is more and Switchtec Logic Port ID and Partition number (which is more
user friendly). user friendly).
Non-Transparent Bridge (NTB) Driver
===================================
An NTB driver is provided for the switchtec hardware in switchtec_ntb.
Currently, it only supports switches configured with exactly 2
partitions. It also requires the following configuration settings:
* Both partitions must be able to access each other's GAS spaces.
Thus, the bits in the GAS Access Vector under Management Settings
must be set to support this.
...@@ -9726,12 +9726,11 @@ S: Supported ...@@ -9726,12 +9726,11 @@ S: Supported
F: drivers/ntb/hw/idt/ F: drivers/ntb/hw/idt/
NTB INTEL DRIVER NTB INTEL DRIVER
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
L: linux-ntb@googlegroups.com L: linux-ntb@googlegroups.com
S: Supported S: Supported
W: https://github.com/jonmason/ntb/wiki W: https://github.com/davejiang/linux/wiki
T: git git://github.com/jonmason/ntb.git T: git https://github.com/davejiang/linux.git
F: drivers/ntb/hw/intel/ F: drivers/ntb/hw/intel/
NTFS FILESYSTEM NTFS FILESYSTEM
...@@ -10443,6 +10442,8 @@ F: Documentation/switchtec.txt ...@@ -10443,6 +10442,8 @@ F: Documentation/switchtec.txt
F: Documentation/ABI/testing/sysfs-class-switchtec F: Documentation/ABI/testing/sysfs-class-switchtec
F: drivers/pci/switch/switchtec* F: drivers/pci/switch/switchtec*
F: include/uapi/linux/switchtec_ioctl.h F: include/uapi/linux/switchtec_ioctl.h
F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
......
source "drivers/ntb/hw/amd/Kconfig" source "drivers/ntb/hw/amd/Kconfig"
source "drivers/ntb/hw/idt/Kconfig" source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig" source "drivers/ntb/hw/intel/Kconfig"
source "drivers/ntb/hw/mscc/Kconfig"
obj-$(CONFIG_NTB_AMD) += amd/ obj-$(CONFIG_NTB_AMD) += amd/
obj-$(CONFIG_NTB_IDT) += idt/ obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/ obj-$(CONFIG_NTB_INTEL) += intel/
obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
...@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev) ...@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev)
/* /*
* IDT PCIe-switch models ports configuration structures * IDT PCIe-switch models ports configuration structures
*/ */
static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
.name = "89HPES24NT6AG2", .name = "89HPES24NT6AG2",
.port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
.name = "89HPES32NT8AG2", .name = "89HPES32NT8AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
.name = "89HPES32NT8BG2", .name = "89HPES32NT8BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
.name = "89HPES12NT12G2", .name = "89HPES12NT12G2",
.port_cnt = 3, .ports = {0, 8, 16} .port_cnt = 3, .ports = {0, 8, 16}
}; };
static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
.name = "89HPES16NT16G2", .name = "89HPES16NT16G2",
.port_cnt = 4, .ports = {0, 8, 12, 16} .port_cnt = 4, .ports = {0, 8, 12, 16}
}; };
static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
.name = "89HPES24NT24G2", .name = "89HPES24NT24G2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
.name = "89HPES32NT24AG2", .name = "89HPES32NT24AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
.name = "89HPES32NT24BG2", .name = "89HPES32NT24BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
}; };
......
...@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
void __iomem *mmio; void __iomem *mmio;
resource_size_t bar_size;
phys_addr_t bar_addr; phys_addr_t bar_addr;
int b2b_bar;
u8 bar_sz;
pdev = ndev->ntb.pdev; pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio; mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) {
dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0;
ndev->b2b_off = 0;
} else {
b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
if (b2b_bar < 0)
return -EIO;
dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
dev_dbg(&pdev->dev, "b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1;
} else if (bar_size >= XEON_B2B_MIN_SIZE) {
dev_dbg(&pdev->dev, "b2b using whole bar\n");
ndev->b2b_off = 0;
--ndev->mw_count;
} else {
dev_dbg(&pdev->dev, "b2b bar size is too small\n");
return -EIO;
}
}
/*
* Reset the secondary bar sizes to match the primary bar sizes,
* except disable or halve the size of the b2b secondary bar.
*/
pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
if (b2b_bar == 1) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
if (b2b_bar == 2) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
/* SBAR01 hit by first part of the b2b bar */
if (b2b_bar == 0)
bar_addr = addr->bar0_addr;
else if (b2b_bar == 1)
bar_addr = addr->bar2_addr64;
else if (b2b_bar == 2)
bar_addr = addr->bar4_addr64;
else
return -EIO;
/* setup incoming bar limits == base addrs (zero length windows) */ /* setup incoming bar limits == base addrs (zero length windows) */
bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); bar_addr = addr->bar2_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); bar_addr = addr->bar4_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
......
config NTB_SWITCHTEC
tristate "MicroSemi Switchtec Non-Transparent Bridge Support"
select PCI_SW_SWITCHTEC
help
Enables NTB support for Switchtec PCI switches. This also
selects the Switchtec management driver as they share the same
hardware interface.
If unsure, say N.
obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
This diff is collapsed.
...@@ -191,8 +191,6 @@ struct ntb_transport_qp { ...@@ -191,8 +191,6 @@ struct ntb_transport_qp {
struct ntb_transport_mw { struct ntb_transport_mw {
phys_addr_t phys_addr; phys_addr_t phys_addr;
resource_size_t phys_size; resource_size_t phys_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
void __iomem *vbase; void __iomem *vbase;
size_t xlat_size; size_t xlat_size;
size_t buff_size; size_t buff_size;
...@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, ...@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev; struct pci_dev *pdev = nt->ndev->pdev;
size_t xlat_size, buff_size; size_t xlat_size, buff_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
int rc; int rc;
if (!size) if (!size)
return -EINVAL; return -EINVAL;
xlat_size = round_up(size, mw->xlat_align_size); rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
buff_size = round_up(size, mw->xlat_align); &xlat_align_size, NULL);
if (rc)
return rc;
xlat_size = round_up(size, xlat_align_size);
buff_size = round_up(size, xlat_align);
/* No need to re-setup */ /* No need to re-setup */
if (mw->xlat_size == xlat_size) if (mw->xlat_size == xlat_size)
...@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, ...@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
* is a requirement of the hardware. It is recommended to setup CMA * is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB. * for BAR sizes equal or greater than 4MB.
*/ */
if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
&mw->dma_addr); &mw->dma_addr);
ntb_free_mw(nt, num_mw); ntb_free_mw(nt, num_mw);
...@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
for (i = 0; i < mw_count; i++) { for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i]; mw = &nt->mw_vec[i];
rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
&mw->xlat_align_size, NULL);
if (rc)
goto err1;
rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
&mw->phys_size); &mw->phys_size);
if (rc) if (rc)
......
...@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)") ...@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)")
struct perf_mw { struct perf_mw {
phys_addr_t phys_addr; phys_addr_t phys_addr;
resource_size_t phys_size; resource_size_t phys_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
void __iomem *vbase; void __iomem *vbase;
size_t xlat_size; size_t xlat_size;
size_t buf_size; size_t buf_size;
...@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) ...@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
{ {
struct perf_mw *mw = &perf->mw; struct perf_mw *mw = &perf->mw;
size_t xlat_size, buf_size; size_t xlat_size, buf_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
int rc; int rc;
if (!size) if (!size)
return -EINVAL; return -EINVAL;
xlat_size = round_up(size, mw->xlat_align_size); rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align,
buf_size = round_up(size, mw->xlat_align); &xlat_align_size, NULL);
if (rc)
return rc;
xlat_size = round_up(size, xlat_align_size);
buf_size = round_up(size, xlat_align);
if (mw->xlat_size == xlat_size) if (mw->xlat_size == xlat_size)
return 0; return 0;
...@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf) ...@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
mw = &perf->mw; mw = &perf->mw;
rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
&mw->xlat_align_size, NULL);
if (rc)
return rc;
rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size); rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
if (rc) if (rc)
return rc; return rc;
......
...@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, ...@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
phys_addr_t base; phys_addr_t base;
resource_size_t mw_size; resource_size_t mw_size;
resource_size_t align_addr; resource_size_t align_addr = 0;
resource_size_t align_size; resource_size_t align_size = 0;
resource_size_t max_size; resource_size_t max_size = 0;
buf_size = min_t(size_t, size, 512); buf_size = min_t(size_t, size, 512);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* *
*/ */
#include <linux/switchtec.h>
#include <linux/switchtec_ioctl.h> #include <linux/switchtec_ioctl.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -20,8 +21,6 @@ ...@@ -20,8 +21,6 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/wait.h> #include <linux/wait.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
...@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644); ...@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
static dev_t switchtec_devt; static dev_t switchtec_devt;
static struct class *switchtec_class;
static DEFINE_IDA(switchtec_minor_ida); static DEFINE_IDA(switchtec_minor_ida);
#define MICROSEMI_VENDOR_ID 0x11f8 struct class *switchtec_class;
#define MICROSEMI_NTB_CLASSCODE 0x068000 EXPORT_SYMBOL_GPL(switchtec_class);
#define MICROSEMI_MGMT_CLASSCODE 0x058000
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
#define SWITCHTEC_MAX_PFF_CSR 48
#define SWITCHTEC_EVENT_OCCURRED BIT(0)
#define SWITCHTEC_EVENT_CLEAR BIT(0)
#define SWITCHTEC_EVENT_EN_LOG BIT(1)
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
};
struct mrpc_regs {
u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u32 cmd;
u32 status;
u32 ret_value;
} __packed;
enum mrpc_status {
SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
SWITCHTEC_MRPC_STATUS_DONE = 2,
SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
};
struct sw_event_regs {
u64 event_report_ctrl;
u64 reserved1;
u64 part_event_bitmap;
u64 reserved2;
u32 global_summary;
u32 reserved3[3];
u32 stack_error_event_hdr;
u32 stack_error_event_data;
u32 reserved4[4];
u32 ppu_error_event_hdr;
u32 ppu_error_event_data;
u32 reserved5[4];
u32 isp_error_event_hdr;
u32 isp_error_event_data;
u32 reserved6[4];
u32 sys_reset_event_hdr;
u32 reserved7[5];
u32 fw_exception_hdr;
u32 reserved8[5];
u32 fw_nmi_hdr;
u32 reserved9[5];
u32 fw_non_fatal_hdr;
u32 reserved10[5];
u32 fw_fatal_hdr;
u32 reserved11[5];
u32 twi_mrpc_comp_hdr;
u32 twi_mrpc_comp_data;
u32 reserved12[4];
u32 twi_mrpc_comp_async_hdr;
u32 twi_mrpc_comp_async_data;
u32 reserved13[4];
u32 cli_mrpc_comp_hdr;
u32 cli_mrpc_comp_data;
u32 reserved14[4];
u32 cli_mrpc_comp_async_hdr;
u32 cli_mrpc_comp_async_data;
u32 reserved15[4];
u32 gpio_interrupt_hdr;
u32 gpio_interrupt_data;
u32 reserved16[4];
} __packed;
enum {
SWITCHTEC_CFG0_RUNNING = 0x04,
SWITCHTEC_CFG1_RUNNING = 0x05,
SWITCHTEC_IMG0_RUNNING = 0x03,
SWITCHTEC_IMG1_RUNNING = 0x07,
};
struct sys_info_regs {
u32 device_id;
u32 device_version;
u32 firmware_version;
u32 reserved1;
u32 vendor_table_revision;
u32 table_format_version;
u32 partition_id;
u32 cfg_file_fmt_version;
u16 cfg_running;
u16 img_running;
u32 reserved2[57];
char vendor_id[8];
char product_id[16];
char product_revision[4];
char component_vendor[8];
u16 component_id;
u8 component_revision;
} __packed;
struct flash_info_regs {
u32 flash_part_map_upd_idx;
struct active_partition_info {
u32 address;
u32 build_version;
u32 build_string;
} active_img;
struct active_partition_info active_cfg;
struct active_partition_info inactive_img;
struct active_partition_info inactive_cfg;
u32 flash_length;
struct partition_info {
u32 address;
u32 length;
} cfg0;
struct partition_info cfg1;
struct partition_info img0;
struct partition_info img1;
struct partition_info nvlog;
struct partition_info vendor[8];
};
struct ntb_info_regs {
u8 partition_count;
u8 partition_id;
u16 reserved1;
u64 ep_map;
u16 requester_id;
} __packed;
struct part_cfg_regs {
u32 status;
u32 state;
u32 port_cnt;
u32 usp_port_mode;
u32 usp_pff_inst_id;
u32 vep_pff_inst_id;
u32 dsp_pff_inst_id[47];
u32 reserved1[11];
u16 vep_vector_number;
u16 usp_vector_number;
u32 port_event_bitmap;
u32 reserved2[3];
u32 part_event_summary;
u32 reserved3[3];
u32 part_reset_hdr;
u32 part_reset_data[5];
u32 mrpc_comp_hdr;
u32 mrpc_comp_data[5];
u32 mrpc_comp_async_hdr;
u32 mrpc_comp_async_data[5];
u32 dyn_binding_hdr;
u32 dyn_binding_data[5];
u32 reserved4[159];
} __packed;
enum {
SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
};
struct pff_csr_regs {
u16 vendor_id;
u16 device_id;
u32 pci_cfg_header[15];
u32 pci_cap_region[48];
u32 pcie_cap_region[448];
u32 indirect_gas_window[128];
u32 indirect_gas_window_off;
u32 reserved[127];
u32 pff_event_summary;
u32 reserved2[3];
u32 aer_in_p2p_hdr;
u32 aer_in_p2p_data[5];
u32 aer_in_vep_hdr;
u32 aer_in_vep_data[5];
u32 dpc_hdr;
u32 dpc_data[5];
u32 cts_hdr;
u32 cts_data[5];
u32 reserved3[6];
u32 hotplug_hdr;
u32 hotplug_data[5];
u32 ier_hdr;
u32 ier_data[5];
u32 threshold_hdr;
u32 threshold_data[5];
u32 power_mgmt_hdr;
u32 power_mgmt_data[5];
u32 tlp_throttling_hdr;
u32 tlp_throttling_data[5];
u32 force_speed_hdr;
u32 force_speed_data[5];
u32 credit_timeout_hdr;
u32 credit_timeout_data[5];
u32 link_state_hdr;
u32 link_state_data[5];
u32 reserved4[174];
} __packed;
struct switchtec_dev {
struct pci_dev *pdev;
struct device dev;
struct cdev cdev;
int partition;
int partition_count;
int pff_csr_count;
char pff_local[SWITCHTEC_MAX_PFF_CSR];
void __iomem *mmio;
struct mrpc_regs __iomem *mmio_mrpc;
struct sw_event_regs __iomem *mmio_sw_event;
struct sys_info_regs __iomem *mmio_sys_info;
struct flash_info_regs __iomem *mmio_flash_info;
struct ntb_info_regs __iomem *mmio_ntb;
struct part_cfg_regs __iomem *mmio_part_cfg;
struct part_cfg_regs __iomem *mmio_part_cfg_all;
struct pff_csr_regs __iomem *mmio_pff_csr;
/*
* The mrpc mutex must be held when accessing the other
* mrpc_ fields, alive flag and stuser->state field
*/
struct mutex mrpc_mutex;
struct list_head mrpc_queue;
int mrpc_busy;
struct work_struct mrpc_work;
struct delayed_work mrpc_timeout;
bool alive;
wait_queue_head_t event_wq;
atomic_t event_cnt;
};
static struct switchtec_dev *to_stdev(struct device *dev)
{
return container_of(dev, struct switchtec_dev, dev);
}
enum mrpc_state { enum mrpc_state {
MRPC_IDLE = 0, MRPC_IDLE = 0,
...@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = { ...@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = {
.compat_ioctl = switchtec_dev_ioctl, .compat_ioctl = switchtec_dev_ioctl,
}; };
static void link_event_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
stdev = container_of(work, struct switchtec_dev, link_event_work);
if (stdev->link_notifier)
stdev->link_notifier(stdev);
}
static void check_link_state_events(struct switchtec_dev *stdev)
{
int idx;
u32 reg;
int count;
int occurred = 0;
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
count = (reg >> 5) & 0xFF;
if (count != stdev->link_event_count[idx]) {
occurred = 1;
stdev->link_event_count[idx] = count;
}
}
if (occurred)
schedule_work(&stdev->link_event_work);
}
static void enable_link_state_events(struct switchtec_dev *stdev)
{
int idx;
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_pff_csr[idx].link_state_hdr);
}
}
static void stdev_release(struct device *dev) static void stdev_release(struct device *dev)
{ {
struct switchtec_dev *stdev = to_stdev(dev); struct switchtec_dev *stdev = to_stdev(dev);
...@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) ...@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
stdev->mrpc_busy = 0; stdev->mrpc_busy = 0;
INIT_WORK(&stdev->mrpc_work, mrpc_event_work); INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
INIT_WORK(&stdev->link_event_work, link_event_work);
init_waitqueue_head(&stdev->event_wq); init_waitqueue_head(&stdev->event_wq);
atomic_set(&stdev->event_cnt, 0); atomic_set(&stdev->event_cnt, 0);
...@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) ...@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0; return 0;
if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
return 0;
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg); iowrite32(hdr, hdr_reg);
...@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid) ...@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid)
for (idx = 0; idx < stdev->pff_csr_count; idx++) { for (idx = 0; idx < stdev->pff_csr_count; idx++) {
if (!stdev->pff_local[idx]) if (!stdev->pff_local[idx])
continue; continue;
count += mask_event(stdev, eid, idx); count += mask_event(stdev, eid, idx);
} }
} else { } else {
...@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev) ...@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
} }
check_link_state_events(stdev);
for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
event_count += mask_all_events(stdev, eid); event_count += mask_all_events(stdev, eid);
...@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev, ...@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev; struct switchtec_dev *stdev;
int rc; int rc;
if (pdev->class == MICROSEMI_NTB_CLASSCODE)
request_module_nowait("ntb_hw_switchtec");
stdev = stdev_create(pdev); stdev = stdev_create(pdev);
if (IS_ERR(stdev)) if (IS_ERR(stdev))
return PTR_ERR(stdev); return PTR_ERR(stdev);
...@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, ...@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
iowrite32(SWITCHTEC_EVENT_CLEAR | iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ, SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr); &stdev->mmio_part_cfg->mrpc_comp_hdr);
enable_link_state_events(stdev);
rc = cdev_device_add(&stdev->cdev, &stdev->dev); rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc) if (rc)
......
...@@ -70,6 +70,7 @@ struct pci_dev; ...@@ -70,6 +70,7 @@ struct pci_dev;
* @NTB_TOPO_SEC: On secondary side of remote ntb. * @NTB_TOPO_SEC: On secondary side of remote ntb.
* @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
* @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
* @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
*/ */
enum ntb_topo { enum ntb_topo {
NTB_TOPO_NONE = -1, NTB_TOPO_NONE = -1,
...@@ -77,6 +78,7 @@ enum ntb_topo { ...@@ -77,6 +78,7 @@ enum ntb_topo {
NTB_TOPO_SEC, NTB_TOPO_SEC,
NTB_TOPO_B2B_USD, NTB_TOPO_B2B_USD,
NTB_TOPO_B2B_DSD, NTB_TOPO_B2B_DSD,
NTB_TOPO_SWITCH,
}; };
static inline int ntb_topo_is_b2b(enum ntb_topo topo) static inline int ntb_topo_is_b2b(enum ntb_topo topo)
...@@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo) ...@@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo)
case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
} }
return "NTB_TOPO_INVALID"; return "NTB_TOPO_INVALID";
} }
...@@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb) ...@@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
* Hardware and topology may support a different number of memory windows. * Hardware and topology may support a different number of memory windows.
* Moreover different peer devices can support different number of memory * Moreover different peer devices can support different number of memory
* windows. Simply speaking this method returns the number of possible inbound * windows. Simply speaking this method returns the number of possible inbound
* memory windows to share with specified peer device. * memory windows to share with specified peer device. Note: this may return
* zero if the link is not up yet.
* *
* Return: the number of memory windows. * Return: the number of memory windows.
*/ */
...@@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx) ...@@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
* Get the alignments of an inbound memory window with specified index. * Get the alignments of an inbound memory window with specified index.
* NULL may be given for any output parameter if the value is not needed. * NULL may be given for any output parameter if the value is not needed.
* The alignment and size parameters may be used for allocation of proper * The alignment and size parameters may be used for allocation of proper
* shared memory. * shared memory. Note: this must only be called when the link is up.
* *
* Return: Zero on success, otherwise a negative error number. * Return: Zero on success, otherwise a negative error number.
*/ */
...@@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, ...@@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
resource_size_t *size_align, resource_size_t *size_align,
resource_size_t *size_max) resource_size_t *size_max)
{ {
if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx)))
return -ENOTCONN;
return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
size_max); size_max);
} }
......
/*
* Microsemi Switchtec PCIe Driver
* Copyright (c) 2017, Microsemi Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _SWITCHTEC_H
#define _SWITCHTEC_H
#include <linux/pci.h>
#include <linux/cdev.h>
#define MICROSEMI_VENDOR_ID 0x11f8
#define MICROSEMI_NTB_CLASSCODE 0x068000
#define MICROSEMI_MGMT_CLASSCODE 0x058000
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
#define SWITCHTEC_MAX_PFF_CSR 48
#define SWITCHTEC_EVENT_OCCURRED BIT(0)
#define SWITCHTEC_EVENT_CLEAR BIT(0)
#define SWITCHTEC_EVENT_EN_LOG BIT(1)
#define SWITCHTEC_EVENT_EN_CLI BIT(2)
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
#define SWITCHTEC_EVENT_FATAL BIT(4)
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
};
struct mrpc_regs {
u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u32 cmd;
u32 status;
u32 ret_value;
} __packed;
enum mrpc_status {
SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
SWITCHTEC_MRPC_STATUS_DONE = 2,
SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
};
struct sw_event_regs {
u64 event_report_ctrl;
u64 reserved1;
u64 part_event_bitmap;
u64 reserved2;
u32 global_summary;
u32 reserved3[3];
u32 stack_error_event_hdr;
u32 stack_error_event_data;
u32 reserved4[4];
u32 ppu_error_event_hdr;
u32 ppu_error_event_data;
u32 reserved5[4];
u32 isp_error_event_hdr;
u32 isp_error_event_data;
u32 reserved6[4];
u32 sys_reset_event_hdr;
u32 reserved7[5];
u32 fw_exception_hdr;
u32 reserved8[5];
u32 fw_nmi_hdr;
u32 reserved9[5];
u32 fw_non_fatal_hdr;
u32 reserved10[5];
u32 fw_fatal_hdr;
u32 reserved11[5];
u32 twi_mrpc_comp_hdr;
u32 twi_mrpc_comp_data;
u32 reserved12[4];
u32 twi_mrpc_comp_async_hdr;
u32 twi_mrpc_comp_async_data;
u32 reserved13[4];
u32 cli_mrpc_comp_hdr;
u32 cli_mrpc_comp_data;
u32 reserved14[4];
u32 cli_mrpc_comp_async_hdr;
u32 cli_mrpc_comp_async_data;
u32 reserved15[4];
u32 gpio_interrupt_hdr;
u32 gpio_interrupt_data;
u32 reserved16[4];
} __packed;
enum {
SWITCHTEC_CFG0_RUNNING = 0x04,
SWITCHTEC_CFG1_RUNNING = 0x05,
SWITCHTEC_IMG0_RUNNING = 0x03,
SWITCHTEC_IMG1_RUNNING = 0x07,
};
struct sys_info_regs {
u32 device_id;
u32 device_version;
u32 firmware_version;
u32 reserved1;
u32 vendor_table_revision;
u32 table_format_version;
u32 partition_id;
u32 cfg_file_fmt_version;
u16 cfg_running;
u16 img_running;
u32 reserved2[57];
char vendor_id[8];
char product_id[16];
char product_revision[4];
char component_vendor[8];
u16 component_id;
u8 component_revision;
} __packed;
struct flash_info_regs {
u32 flash_part_map_upd_idx;
struct active_partition_info {
u32 address;
u32 build_version;
u32 build_string;
} active_img;
struct active_partition_info active_cfg;
struct active_partition_info inactive_img;
struct active_partition_info inactive_cfg;
u32 flash_length;
struct partition_info {
u32 address;
u32 length;
} cfg0;
struct partition_info cfg1;
struct partition_info img0;
struct partition_info img1;
struct partition_info nvlog;
struct partition_info vendor[8];
};
enum {
SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000,
SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000,
SWITCHTEC_NTB_REG_DBMSG_OFFSET = 0x64000,
};
struct ntb_info_regs {
u8 partition_count;
u8 partition_id;
u16 reserved1;
u64 ep_map;
u16 requester_id;
} __packed;
struct part_cfg_regs {
u32 status;
u32 state;
u32 port_cnt;
u32 usp_port_mode;
u32 usp_pff_inst_id;
u32 vep_pff_inst_id;
u32 dsp_pff_inst_id[47];
u32 reserved1[11];
u16 vep_vector_number;
u16 usp_vector_number;
u32 port_event_bitmap;
u32 reserved2[3];
u32 part_event_summary;
u32 reserved3[3];
u32 part_reset_hdr;
u32 part_reset_data[5];
u32 mrpc_comp_hdr;
u32 mrpc_comp_data[5];
u32 mrpc_comp_async_hdr;
u32 mrpc_comp_async_data[5];
u32 dyn_binding_hdr;
u32 dyn_binding_data[5];
u32 reserved4[159];
} __packed;
enum {
NTB_CTRL_PART_OP_LOCK = 0x1,
NTB_CTRL_PART_OP_CFG = 0x2,
NTB_CTRL_PART_OP_RESET = 0x3,
NTB_CTRL_PART_STATUS_NORMAL = 0x1,
NTB_CTRL_PART_STATUS_LOCKED = 0x2,
NTB_CTRL_PART_STATUS_LOCKING = 0x3,
NTB_CTRL_PART_STATUS_CONFIGURING = 0x4,
NTB_CTRL_PART_STATUS_RESETTING = 0x5,
NTB_CTRL_BAR_VALID = 1 << 0,
NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4,
NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5,
NTB_CTRL_REQ_ID_EN = 1 << 0,
NTB_CTRL_LUT_EN = 1 << 0,
NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
};
struct ntb_ctrl_regs {
u32 partition_status;
u32 partition_op;
u32 partition_ctrl;
u32 bar_setup;
u32 bar_error;
u16 lut_table_entries;
u16 lut_table_offset;
u32 lut_error;
u16 req_id_table_size;
u16 req_id_table_offset;
u32 req_id_error;
u32 reserved1[7];
struct {
u32 ctl;
u32 win_size;
u64 xlate_addr;
} bar_entry[6];
u32 reserved2[216];
u32 req_id_table[256];
u32 reserved3[512];
u64 lut_entry[512];
} __packed;
#define NTB_DBMSG_IMSG_STATUS BIT_ULL(32)
#define NTB_DBMSG_IMSG_MASK BIT_ULL(40)
struct ntb_dbmsg_regs {
u32 reserved1[1024];
u64 odb;
u64 odb_mask;
u64 idb;
u64 idb_mask;
u8 idb_vec_map[64];
u32 msg_map;
u32 reserved2;
struct {
u32 msg;
u32 status;
} omsg[4];
struct {
u32 msg;
u8 status;
u8 mask;
u8 src;
u8 reserved;
} imsg[4];
u8 reserved3[3928];
u8 msix_table[1024];
u8 reserved4[3072];
u8 pba[24];
u8 reserved5[4072];
} __packed;
enum {
SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
};
struct pff_csr_regs {
u16 vendor_id;
u16 device_id;
u32 pci_cfg_header[15];
u32 pci_cap_region[48];
u32 pcie_cap_region[448];
u32 indirect_gas_window[128];
u32 indirect_gas_window_off;
u32 reserved[127];
u32 pff_event_summary;
u32 reserved2[3];
u32 aer_in_p2p_hdr;
u32 aer_in_p2p_data[5];
u32 aer_in_vep_hdr;
u32 aer_in_vep_data[5];
u32 dpc_hdr;
u32 dpc_data[5];
u32 cts_hdr;
u32 cts_data[5];
u32 reserved3[6];
u32 hotplug_hdr;
u32 hotplug_data[5];
u32 ier_hdr;
u32 ier_data[5];
u32 threshold_hdr;
u32 threshold_data[5];
u32 power_mgmt_hdr;
u32 power_mgmt_data[5];
u32 tlp_throttling_hdr;
u32 tlp_throttling_data[5];
u32 force_speed_hdr;
u32 force_speed_data[5];
u32 credit_timeout_hdr;
u32 credit_timeout_data[5];
u32 link_state_hdr;
u32 link_state_data[5];
u32 reserved4[174];
} __packed;
struct switchtec_ntb;
struct switchtec_dev {
struct pci_dev *pdev;
struct device dev;
struct cdev cdev;
int partition;
int partition_count;
int pff_csr_count;
char pff_local[SWITCHTEC_MAX_PFF_CSR];
void __iomem *mmio;
struct mrpc_regs __iomem *mmio_mrpc;
struct sw_event_regs __iomem *mmio_sw_event;
struct sys_info_regs __iomem *mmio_sys_info;
struct flash_info_regs __iomem *mmio_flash_info;
struct ntb_info_regs __iomem *mmio_ntb;
struct part_cfg_regs __iomem *mmio_part_cfg;
struct part_cfg_regs __iomem *mmio_part_cfg_all;
struct pff_csr_regs __iomem *mmio_pff_csr;
/*
* The mrpc mutex must be held when accessing the other
* mrpc_ fields, alive flag and stuser->state field
*/
struct mutex mrpc_mutex;
struct list_head mrpc_queue;
int mrpc_busy;
struct work_struct mrpc_work;
struct delayed_work mrpc_timeout;
bool alive;
wait_queue_head_t event_wq;
atomic_t event_cnt;
struct work_struct link_event_work;
void (*link_notifier)(struct switchtec_dev *stdev);
u8 link_event_count[SWITCHTEC_MAX_PFF_CSR];
struct switchtec_ntb *sndev;
};
static inline struct switchtec_dev *to_stdev(struct device *dev)
{
return container_of(dev, struct switchtec_dev, dev);
}
extern struct class *switchtec_class;
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment