Commit 5be31686 authored by Bjorn Helgaas's avatar Bjorn Helgaas Committed by Bjorn Helgaas

Merge branch 'pci/enumeration' into next

* pci/enumeration:
  RDMA/qedr: Use pci_enable_atomic_ops_to_root()
  PCI: Add pci_enable_atomic_ops_to_root()
  PCI: Make PCI_SCAN_ALL_PCIE_DEVS work for Root as well as Downstream Ports
parents 6b290397 20c3ff61
......@@ -430,59 +430,16 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev)
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
{
struct pci_dev *bridge;
u32 ctl2, cap2;
u16 flags;
int rc;
bridge = pdev->bus->self;
if (!bridge)
goto disable;
/* Check atomic routing support all the way to root complex */
while (bridge->bus->parent) {
rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
goto disable;
rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
if (rc)
goto disable;
int rc = pci_enable_atomic_ops_to_root(pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2);
if (rc)
goto disable;
if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) ||
(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK))
goto disable;
bridge = bridge->bus->parent->self;
if (rc) {
dev->atomic_cap = IB_ATOMIC_NONE;
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
} else {
dev->atomic_cap = IB_ATOMIC_GLOB;
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
}
rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
goto disable;
rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64))
goto disable;
/* Set atomic operations */
pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_ATOMIC_REQ);
dev->atomic_cap = IB_ATOMIC_GLOB;
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
return;
disable:
pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_ATOMIC_REQ);
dev->atomic_cap = IB_ATOMIC_NONE;
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
}
static const struct qed_rdma_ops *qed_ops;
......
......@@ -3065,6 +3065,81 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
return 0;
}
/**
* pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
* @dev: the PCI device
* @cap_mask: mask of desired AtomicOp sizes, including one or more of:
* PCI_EXP_DEVCAP2_ATOMIC_COMP32
* PCI_EXP_DEVCAP2_ATOMIC_COMP64
* PCI_EXP_DEVCAP2_ATOMIC_COMP128
*
* Return 0 if all upstream bridges support AtomicOp routing, egress
* blocking is disabled on all upstream ports, and the root port supports
* the requested completion capabilities (32-bit, 64-bit and/or 128-bit
* AtomicOp completion), or negative otherwise.
*/
int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
{
struct pci_bus *bus = dev->bus;
struct pci_dev *bridge;
u32 cap, ctl2;
if (!pci_is_pcie(dev))
return -EINVAL;
/*
* Per PCIe r4.0, sec 6.15, endpoints and root ports may be
* AtomicOp requesters. For now, we only support endpoints as
* requesters and root ports as completers. No endpoints as
* completers, and no peer-to-peer.
*/
switch (pci_pcie_type(dev)) {
case PCI_EXP_TYPE_ENDPOINT:
case PCI_EXP_TYPE_LEG_END:
case PCI_EXP_TYPE_RC_END:
break;
default:
return -EINVAL;
}
while (bus->parent) {
bridge = bus->self;
pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
switch (pci_pcie_type(bridge)) {
/* Ensure switch ports support AtomicOp routing */
case PCI_EXP_TYPE_UPSTREAM:
case PCI_EXP_TYPE_DOWNSTREAM:
if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
return -EINVAL;
break;
/* Ensure root port supports all the sizes we care about */
case PCI_EXP_TYPE_ROOT_PORT:
if ((cap & cap_mask) != cap_mask)
return -EINVAL;
break;
}
/* Ensure upstream ports don't block AtomicOps on egress */
if (!bridge->has_secondary_link) {
pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
&ctl2);
if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
return -EINVAL;
}
bus = bus->parent;
}
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_ATOMIC_REQ);
return 0;
}
EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
......
......@@ -2248,22 +2248,27 @@ static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
static int only_one_child(struct pci_bus *bus)
{
struct pci_dev *parent = bus->self;
struct pci_dev *bridge = bus->self;
if (!parent || !pci_is_pcie(parent))
/*
* Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
* we scan for all possible devices, not just Device 0.
*/
if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
return 0;
if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
return 1;
/*
* PCIe downstream ports are bridges that normally lead to only a
* device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
* possible devices, not just device 0. See PCIe spec r3.0,
* sec 7.3.1.
* A PCIe Downstream Port normally leads to a Link with only Device
* 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
* only for Device 0 in that situation.
*
* Checking has_secondary_link is a hack to identify Downstream
* Ports because sometimes Switches are configured such that the
* PCIe Port Type labels are backwards.
*/
if (parent->has_secondary_link &&
!pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link)
return 1;
return 0;
}
......
......@@ -2063,6 +2063,7 @@ void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
......
......@@ -624,7 +624,9 @@
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
#define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment