Commit 4cc4b932 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma updates from Doug Ledford:
 "First set of updates for 4.11 kernel merge window

   - Add new Broadcom bnxt_re RoCE driver
   - rxe driver updates
   - ioctl cleanups
   - ETH_P_IBOE declaration cleanup
   - IPoIB changes
   - Add port state cache
   - Allow srpt driver to accept guids as port names in config
   - Update to hfi1 driver
   - Update to srp driver
   - Lots of misc minor changes all over"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (114 commits)
  RDMA/bnxt_re: fix for "bnxt_en: Update to firmware interface spec 1.7.0."
  rdma_cm: fail iwarp accepts w/o connection params
  IB/srp: Drain the send queue before destroying a QP
  IB/core: Add support for draining IB_POLL_DIRECT completion queues
  IB/srp: Improve an error path
  IB/srp: Make a diagnostic message more informative
  IB/srp: Document locking conventions
  IB/srp: Fix race conditions related to task management
  IB/srp: Avoid that duplicate responses trigger a kernel bug
  IB/SRP: Avoid using IB_MR_TYPE_SG_GAPS
  RDMA/qedr: Fix some error handling
  RDMA/bnxt_re: add DCB dependency
  IB/hns: include linux/module.h
  IB/vmw_pvrdma: Expose vendor error to ULPs
  vmw_pvrdma: switch to pci_alloc_irq_vectors
  IB/hfi1: use size_t for passing array length
  IB/ipoib: Remove redudant label
  IB/ipoib: remove the unnecessary memory free
  IB/mthca: switch to pci_alloc_irq_vectors
  IB/hfi1: Code reuse with memdup_copy
  ...
parents a57eaa1f db690328
...@@ -20,3 +20,11 @@ Description: RDMA-CM based connections from HCA <hca> at port <port-num> ...@@ -20,3 +20,11 @@ Description: RDMA-CM based connections from HCA <hca> at port <port-num>
will be initiated with this RoCE type as default. will be initiated with this RoCE type as default.
The possible RoCE types are either "IB/RoCE v1" or "RoCE v2". The possible RoCE types are either "IB/RoCE v1" or "RoCE v2".
This parameter has RW access. This parameter has RW access.
What: /config/rdma_cm/<hca>/ports/<port-num>/default_roce_tos
Date: February 7, 2017
KernelVersion: 4.11.0
Description: RDMA-CM QPs from HCA <hca> at port <port-num>
will be created with this TOS as default.
This can be overridden by using the rdma_set_option API.
The possible RoCE TOS values are 0-255.
...@@ -2826,6 +2826,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) ...@@ -2826,6 +2826,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: arch/arm64/boot/dts/broadcom/vulcan* F: arch/arm64/boot/dts/broadcom/vulcan*
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
M: Devesh Sharma <devesh.sharma@broadcom.com>
M: Somnath Kotur <somnath.kotur@broadcom.com>
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
L: linux-rdma@vger.kernel.org
W: http://www.broadcom.com
S: Supported
F: drivers/infiniband/hw/bnxt_re/
F: include/uapi/rdma/bnxt_re-abi.h
BROCADE BFA FC SCSI DRIVER BROCADE BFA FC SCSI DRIVER
M: Anil Gurumurthy <anil.gurumurthy@qlogic.com> M: Anil Gurumurthy <anil.gurumurthy@qlogic.com>
M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com> M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
......
...@@ -92,4 +92,6 @@ source "drivers/infiniband/hw/hfi1/Kconfig" ...@@ -92,4 +92,6 @@ source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/qedr/Kconfig" source "drivers/infiniband/hw/qedr/Kconfig"
source "drivers/infiniband/hw/bnxt_re/Kconfig"
endif # INFINIBAND endif # INFINIBAND
This diff is collapsed.
...@@ -3409,6 +3409,8 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg, ...@@ -3409,6 +3409,8 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
goto discard; goto discard;
pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
state, ib_wc_status_msg(wc_status));
switch (state) { switch (state) {
case IB_CM_REQ_SENT: case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REQ_RCVD:
......
This diff is collapsed.
...@@ -139,8 +139,50 @@ static ssize_t default_roce_mode_store(struct config_item *item, ...@@ -139,8 +139,50 @@ static ssize_t default_roce_mode_store(struct config_item *item,
CONFIGFS_ATTR(, default_roce_mode); CONFIGFS_ATTR(, default_roce_mode);
static ssize_t default_roce_tos_show(struct config_item *item, char *buf)
{
struct cma_device *cma_dev;
struct cma_dev_port_group *group;
ssize_t ret;
u8 tos;
ret = cma_configfs_params_get(item, &cma_dev, &group);
if (ret)
return ret;
tos = cma_get_default_roce_tos(cma_dev, group->port_num);
cma_configfs_params_put(cma_dev);
return sprintf(buf, "%u\n", tos);
}
static ssize_t default_roce_tos_store(struct config_item *item,
const char *buf, size_t count)
{
struct cma_device *cma_dev;
struct cma_dev_port_group *group;
ssize_t ret;
u8 tos;
ret = kstrtou8(buf, 0, &tos);
if (ret)
return ret;
ret = cma_configfs_params_get(item, &cma_dev, &group);
if (ret)
return ret;
ret = cma_set_default_roce_tos(cma_dev, group->port_num, tos);
cma_configfs_params_put(cma_dev);
return ret ? ret : strnlen(buf, count);
}
CONFIGFS_ATTR(, default_roce_tos);
static struct configfs_attribute *cma_configfs_attributes[] = { static struct configfs_attribute *cma_configfs_attributes[] = {
&attr_default_roce_mode, &attr_default_roce_mode,
&attr_default_roce_tos,
NULL, NULL,
}; };
......
...@@ -62,6 +62,9 @@ int cma_get_default_gid_type(struct cma_device *cma_dev, ...@@ -62,6 +62,9 @@ int cma_get_default_gid_type(struct cma_device *cma_dev,
int cma_set_default_gid_type(struct cma_device *cma_dev, int cma_set_default_gid_type(struct cma_device *cma_dev,
unsigned int port, unsigned int port,
enum ib_gid_type default_gid_type); enum ib_gid_type default_gid_type);
int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port);
int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port,
u8 default_roce_tos);
struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev);
int ib_device_register_sysfs(struct ib_device *device, int ib_device_register_sysfs(struct ib_device *device,
......
...@@ -58,8 +58,8 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) ...@@ -58,8 +58,8 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
* context and does not ask for completion interrupts from the HCA. * context and does not ask for completion interrupts from the HCA.
* *
* Note: for compatibility reasons -1 can be passed in %budget for unlimited * Note: do not pass -1 as %budget unless it is guaranteed that the number
* polling. Do not use this feature in new code, it will be removed soon. * of completions that will be processed is small.
*/ */
int ib_process_cq_direct(struct ib_cq *cq, int budget) int ib_process_cq_direct(struct ib_cq *cq, int budget)
{ {
...@@ -120,7 +120,7 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) ...@@ -120,7 +120,7 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
* *
* This is the proper interface to allocate a CQ for in-kernel users. A * This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the * CQ allocated with this interface will automatically be polled from the
* specified context. The ULP needs must use wr->wr_cqe instead of wr->wr_id * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction. * to use this CQ abstraction.
*/ */
struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
......
...@@ -659,7 +659,7 @@ int ib_query_port(struct ib_device *device, ...@@ -659,7 +659,7 @@ int ib_query_port(struct ib_device *device,
union ib_gid gid; union ib_gid gid;
int err; int err;
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) if (!rdma_is_port_valid(device, port_num))
return -EINVAL; return -EINVAL;
memset(port_attr, 0, sizeof(*port_attr)); memset(port_attr, 0, sizeof(*port_attr));
...@@ -825,7 +825,7 @@ int ib_modify_port(struct ib_device *device, ...@@ -825,7 +825,7 @@ int ib_modify_port(struct ib_device *device,
if (!device->modify_port) if (!device->modify_port)
return -ENOSYS; return -ENOSYS;
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) if (!rdma_is_port_valid(device, port_num))
return -EINVAL; return -EINVAL;
return device->modify_port(device, port_num, port_modify_mask, return device->modify_port(device, port_num, port_modify_mask,
......
...@@ -316,7 +316,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -316,7 +316,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate device and port */ /* Validate device and port */
port_priv = ib_get_mad_port(device, port_num); port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) { if (!port_priv) {
dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); dev_notice(&device->dev,
"ib_register_mad_agent: Invalid port %d\n",
port_num);
ret = ERR_PTR(-ENODEV); ret = ERR_PTR(-ENODEV);
goto error1; goto error1;
} }
......
...@@ -144,7 +144,6 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de ...@@ -144,7 +144,6 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port, static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie) struct net_device *rdma_ndev, void *cookie)
{ {
struct net_device *event_ndev = (struct net_device *)cookie;
struct net_device *real_dev; struct net_device *real_dev;
int res; int res;
...@@ -152,11 +151,11 @@ static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port, ...@@ -152,11 +151,11 @@ static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
real_dev = rdma_vlan_dev_real_dev(event_ndev); real_dev = rdma_vlan_dev_real_dev(cookie);
if (!real_dev) if (!real_dev)
real_dev = event_ndev; real_dev = cookie;
res = ((rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) && res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
(is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) & (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
REQUIRED_BOND_STATES)) || REQUIRED_BOND_STATES)) ||
real_dev == rdma_ndev); real_dev == rdma_ndev);
...@@ -192,17 +191,16 @@ static int pass_all_filter(struct ib_device *ib_dev, u8 port, ...@@ -192,17 +191,16 @@ static int pass_all_filter(struct ib_device *ib_dev, u8 port,
static int upper_device_filter(struct ib_device *ib_dev, u8 port, static int upper_device_filter(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie) struct net_device *rdma_ndev, void *cookie)
{ {
struct net_device *event_ndev = (struct net_device *)cookie;
int res; int res;
if (!rdma_ndev) if (!rdma_ndev)
return 0; return 0;
if (rdma_ndev == event_ndev) if (rdma_ndev == cookie)
return 1; return 1;
rcu_read_lock(); rcu_read_lock();
res = rdma_is_upper_dev_rcu(rdma_ndev, event_ndev); res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
rcu_read_unlock(); rcu_read_unlock();
return res; return res;
...@@ -379,18 +377,14 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, ...@@ -379,18 +377,14 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
static void add_netdev_ips(struct ib_device *ib_dev, u8 port, static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie) struct net_device *rdma_ndev, void *cookie)
{ {
struct net_device *event_ndev = (struct net_device *)cookie; enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
_add_netdev_ips(ib_dev, port, cookie);
enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
_add_netdev_ips(ib_dev, port, event_ndev);
} }
static void del_netdev_ips(struct ib_device *ib_dev, u8 port, static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie) struct net_device *rdma_ndev, void *cookie)
{ {
struct net_device *event_ndev = (struct net_device *)cookie; ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
} }
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
...@@ -460,7 +454,7 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, ...@@ -460,7 +454,7 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
u8 port, u8 port,
struct net_device *ndev)) struct net_device *ndev))
{ {
struct net_device *ndev = (struct net_device *)cookie; struct net_device *ndev = cookie;
struct upper_list *upper_iter; struct upper_list *upper_iter;
struct upper_list *upper_temp; struct upper_list *upper_temp;
LIST_HEAD(upper_list); LIST_HEAD(upper_list);
...@@ -519,9 +513,7 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port, ...@@ -519,9 +513,7 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port, static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie) struct net_device *rdma_ndev, void *cookie)
{ {
struct net_device *event_ndev = (struct net_device *)cookie; bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
} }
/* The following functions operate on all IB devices. netdevice_event and /* The following functions operate on all IB devices. netdevice_event and
......
...@@ -1205,8 +1205,7 @@ int ib_resolve_eth_dmac(struct ib_device *device, ...@@ -1205,8 +1205,7 @@ int ib_resolve_eth_dmac(struct ib_device *device,
{ {
int ret = 0; int ret = 0;
if (ah_attr->port_num < rdma_start_port(device) || if (!rdma_is_port_valid(device, ah_attr->port_num))
ah_attr->port_num > rdma_end_port(device))
return -EINVAL; return -EINVAL;
if (!rdma_cap_eth_ah(device, ah_attr->port_num)) if (!rdma_cap_eth_ah(device, ah_attr->port_num))
...@@ -1949,17 +1948,12 @@ static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1949,17 +1948,12 @@ static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
*/ */
static void __ib_drain_sq(struct ib_qp *qp) static void __ib_drain_sq(struct ib_qp *qp)
{ {
struct ib_cq *cq = qp->send_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe sdrain; struct ib_drain_cqe sdrain;
struct ib_send_wr swr = {}, *bad_swr; struct ib_send_wr swr = {}, *bad_swr;
int ret; int ret;
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
return;
}
swr.wr_cqe = &sdrain.cqe; swr.wr_cqe = &sdrain.cqe;
sdrain.cqe.done = ib_drain_qp_done; sdrain.cqe.done = ib_drain_qp_done;
init_completion(&sdrain.done); init_completion(&sdrain.done);
...@@ -1976,6 +1970,10 @@ static void __ib_drain_sq(struct ib_qp *qp) ...@@ -1976,6 +1970,10 @@ static void __ib_drain_sq(struct ib_qp *qp)
return; return;
} }
if (cq->poll_ctx == IB_POLL_DIRECT)
while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
ib_process_cq_direct(cq, -1);
else
wait_for_completion(&sdrain.done); wait_for_completion(&sdrain.done);
} }
...@@ -1984,17 +1982,12 @@ static void __ib_drain_sq(struct ib_qp *qp) ...@@ -1984,17 +1982,12 @@ static void __ib_drain_sq(struct ib_qp *qp)
*/ */
static void __ib_drain_rq(struct ib_qp *qp) static void __ib_drain_rq(struct ib_qp *qp)
{ {
struct ib_cq *cq = qp->recv_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe rdrain; struct ib_drain_cqe rdrain;
struct ib_recv_wr rwr = {}, *bad_rwr; struct ib_recv_wr rwr = {}, *bad_rwr;
int ret; int ret;
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
return;
}
rwr.wr_cqe = &rdrain.cqe; rwr.wr_cqe = &rdrain.cqe;
rdrain.cqe.done = ib_drain_qp_done; rdrain.cqe.done = ib_drain_qp_done;
init_completion(&rdrain.done); init_completion(&rdrain.done);
...@@ -2011,6 +2004,10 @@ static void __ib_drain_rq(struct ib_qp *qp) ...@@ -2011,6 +2004,10 @@ static void __ib_drain_rq(struct ib_qp *qp)
return; return;
} }
if (cq->poll_ctx == IB_POLL_DIRECT)
while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
ib_process_cq_direct(cq, -1);
else
wait_for_completion(&rdrain.done); wait_for_completion(&rdrain.done);
} }
...@@ -2028,8 +2025,7 @@ static void __ib_drain_rq(struct ib_qp *qp) ...@@ -2028,8 +2025,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
* ensure there is room in the CQ and SQ for the drain work request and * ensure there is room in the CQ and SQ for the drain work request and
* completion. * completion.
* *
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be * allocate the CQ using ib_alloc_cq().
* IB_POLL_DIRECT.
* *
* ensure that there are no other contexts that are posting WRs concurrently. * ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed. * Otherwise the drain is not guaranteed.
...@@ -2057,8 +2053,7 @@ EXPORT_SYMBOL(ib_drain_sq); ...@@ -2057,8 +2053,7 @@ EXPORT_SYMBOL(ib_drain_sq);
* ensure there is room in the CQ and RQ for the drain work request and * ensure there is room in the CQ and RQ for the drain work request and
* completion. * completion.
* *
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be * allocate the CQ using ib_alloc_cq().
* IB_POLL_DIRECT.
* *
* ensure that there are no other contexts that are posting WRs concurrently. * ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed. * Otherwise the drain is not guaranteed.
...@@ -2082,8 +2077,7 @@ EXPORT_SYMBOL(ib_drain_rq); ...@@ -2082,8 +2077,7 @@ EXPORT_SYMBOL(ib_drain_rq);
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
* and completions. * and completions.
* *
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be * allocate the CQs using ib_alloc_cq().
* IB_POLL_DIRECT.
* *
* ensure that there are no other contexts that are posting WRs concurrently. * ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed. * Otherwise the drain is not guaranteed.
......
...@@ -12,3 +12,4 @@ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ ...@@ -12,3 +12,4 @@ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/ obj-$(CONFIG_INFINIBAND_HNS) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
select NET_VENDOR_BROADCOM
select BNXT
---help---
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
the module will be called bnxt_re.
ccflags-y := -Idrivers/net/ethernet/broadcom/bnxt
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
bnxt_re-y := main.o ib_verbs.o \
qplib_res.o qplib_rcfw.o \
qplib_sp.o qplib_fp.o
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Slow Path Operators (header)
*
*/
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
#define BNXT_RE_PAGE_SIZE_4K BIT(12)
#define BNXT_RE_PAGE_SIZE_8K BIT(13)
#define BNXT_RE_PAGE_SIZE_64K BIT(16)
#define BNXT_RE_PAGE_SIZE_2M BIT(21)
#define BNXT_RE_PAGE_SIZE_8M BIT(23)
#define BNXT_RE_PAGE_SIZE_1G BIT(30)
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
struct bnxt_re_dev *rdev;
struct net_device *vlan_dev;
};
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
/* For storing the actual qp1 cqe */
struct bnxt_qplib_cqe cqe;
struct bnxt_re_qp *qp1_qp;
};
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 16
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
#define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8
#define BNXT_RE_FLAG_QOS_WORK_REG 16
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
int id;
struct delayed_work worker;
u8 cur_prio_map;
/* FP Notification Queue (CQ & SRQ) */
struct tasklet_struct nq_task;
/* RCFW Channel */
struct bnxt_qplib_rcfw rcfw;
/* NQ */
struct bnxt_qplib_nq nq;
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
atomic_t qp_count;
struct mutex qp_lock; /* protect qp list */
struct list_head qp_list;
atomic_t cq_count;
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
/* QP for for handling QP1 packets */
u32 sqp_id;
struct bnxt_re_qp *qp1_sqp;
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
};
#define to_bnxt_re_dev(ptr, member) \
container_of((ptr), struct bnxt_re_dev, member)
#define BNXT_RE_ROCE_V1_PACKET 0
#define BNXT_RE_ROCEV2_IPV4_PACKET 2
#define BNXT_RE_ROCEV2_IPV6_PACKET 3
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
return &rdev->ibdev.dev;
return NULL;
}
#endif
This diff is collapsed.
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: IB Verbs interpreter (header)
*/
#ifndef __BNXT_RE_IB_VERBS_H__
#define __BNXT_RE_IB_VERBS_H__
struct bnxt_re_gid_ctx {
u32 idx;
u32 refcnt;
};
struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi;
};
struct bnxt_re_ah {
struct bnxt_re_dev *rdev;
struct ib_ah ib_ah;
struct bnxt_qplib_ah qplib_ah;
};
struct bnxt_re_qp {
struct list_head list;
struct bnxt_re_dev *rdev;
struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */
struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem;
struct ib_umem *rumem;
/* QP1 */
u32 send_psn;
struct ib_ud_header qp1_hdr;
};
struct bnxt_re_cq {
struct bnxt_re_dev *rdev;
spinlock_t cq_lock; /* protect cq */
u16 cq_count;
u16 cq_period;
struct ib_cq ib_cq;
struct bnxt_qplib_cq qplib_cq;
struct bnxt_qplib_cqe *cql;
#define MAX_CQL_PER_POLL 1024
u32 max_cql;
struct ib_umem *umem;
};
struct bnxt_re_mr {
struct bnxt_re_dev *rdev;
struct ib_mr ib_mr;
struct ib_umem *ib_umem;
struct bnxt_qplib_mrw qplib_mr;
u32 npages;
u64 *pages;
struct bnxt_qplib_frpl qplib_frpl;
};
struct bnxt_re_frpl {
struct bnxt_re_dev *rdev;
struct bnxt_qplib_frpl qplib_frpl;
u64 *page_list;
};
struct bnxt_re_fmr {
struct bnxt_re_dev *rdev;
struct ib_fmr ib_fmr;
struct bnxt_qplib_mrw qplib_fmr;
};
struct bnxt_re_mw {
struct bnxt_re_dev *rdev;
struct ib_mw ib_mw;
struct bnxt_qplib_mrw qplib_mw;
};
struct bnxt_re_ucontext {
struct bnxt_re_dev *rdev;
struct ib_ucontext ib_uctx;
struct bnxt_qplib_dpi *dpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
};
struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num);
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
int bnxt_re_modify_device(struct ib_device *ibdev,
int device_modify_mask,
struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
unsigned int index, void **context);
int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context);
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
int bnxt_re_dealloc_pd(struct ib_pd *pd);
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
struct ib_ah_attr *ah_attr,
struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int bnxt_re_destroy_qp(struct ib_qp *qp);
int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr);
int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int bnxt_re_destroy_cq(struct ib_cq *cq);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr);
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
u64 iova);
int bnxt_re_unmap_fmr(struct list_head *fmr_list);
int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata);
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
#endif /* __BNXT_RE_IB_VERBS_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: RDMA Controller HW interface (header)
*/
#ifndef __BNXT_QPLIB_RCFW_H__
#define __BNXT_QPLIB_RCFW_H__
#define RCFW_CMDQ_TRIG_VAL 1
#define RCFW_COMM_PCI_BAR_REGION 0
#define RCFW_COMM_CONS_PCI_BAR_REGION 2
#define RCFW_COMM_BASE_OFFSET 0x600
#define RCFW_PF_COMM_PROD_OFFSET 0xc
#define RCFW_VF_COMM_PROD_OFFSET 0xc
#define RCFW_COMM_TRIG_OFFSET 0x100
#define RCFW_COMM_SIZE 0x104
#define RCFW_DBR_PCI_BAR_REGION 2
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
do { \
memset(&(req), 0, sizeof((req))); \
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
(req).cmd_size = (sizeof((req)) + \
BNXT_QPLIB_CMDQE_UNITS - 1) / \
BNXT_QPLIB_CMDQE_UNITS; \
(req).flags = cpu_to_le16(cmd_flags); \
} while (0)
#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
/* CMDQ elements */
#define BNXT_QPLIB_CMDQE_MAX_CNT 256
#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
#define BNXT_QPLIB_CMDQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CMDQE_UNITS)
#define MAX_CMDQ_IDX (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
#define MAX_CMDQ_IDX_PER_PG (BNXT_QPLIB_CMDQE_CNT_PER_PG - 1)
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
/* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe {
u8 data[16];
};
static inline u32 get_cmdq_pg(u32 val)
{
return (val & ~MAX_CMDQ_IDX_PER_PG) / BNXT_QPLIB_CMDQE_CNT_PER_PG;
}
static inline u32 get_cmdq_idx(u32 val)
{
return val & MAX_CMDQ_IDX_PER_PG;
}
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
u8 data[1024];
};
/* CRSQ SB */
#define BNXT_QPLIB_CRSBE_MAX_CNT 4
#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
static inline u32 get_crsb_pg(u32 val)
{
return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
}
static inline u32 get_crsb_idx(u32 val)
{
return val & MAX_CRSB_IDX_PER_PG;
}
static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
u32 prod, dma_addr_t *dma_addr)
{
*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
BNXT_QPLIB_CRSBE_UNITS;
}
/* CREQ */
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
static inline u32 get_creq_pg(u32 val)
{
return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
}
static inline u32 get_creq_idx(u32 val)
{
return val & MAX_CREQ_IDX_PER_PG;
}
#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->v & CREQ_BASE_V) == \
!((raw_cons) & (cp_bit)))
#define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
#define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
#define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
#define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
CREQ_DB_IDX_VALID)
#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
CREQ_DB_IDX_VALID | \
CREQ_DB_IRQ_DIS)
#define CREQ_DB_REARM(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
#define CREQ_DB(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
/* HWQ */
struct bnxt_qplib_crsqe {
struct creq_qp_event qp_event;
u32 req_size;
};
struct bnxt_qplib_crsq {
struct bnxt_qplib_crsqe *crsq;
u32 prod;
u32 cons;
u32 max_elements;
};
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
int vector;
struct tasklet_struct worker;
bool requested;
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
#define FIRMWARE_INITIALIZED_FLAG 1
#define FIRMWARE_FIRST_FLAG BIT(31)
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
atomic_t seq_num;
/* Bar region info */
void __iomem *cmdq_bar_reg_iomem;
u16 cmdq_bar_reg;
u16 cmdq_bar_reg_prod_off;
u16 cmdq_bar_reg_trig_off;
u16 creq_ring_id;
u16 creq_bar_reg;
void __iomem *creq_bar_reg_iomem;
/* Cmd-Resp and Async Event notification queue */
struct bnxt_qplib_hwq creq;
u64 creq_qp_event_processed;
u64 creq_func_event_processed;
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq crsq;
struct bnxt_qplib_hwq crsb;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)
(struct bnxt_qplib_rcfw *,
struct creq_func_event *));
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct cmdq_base *req, void **crsbe,
u8 is_block);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
#endif /* __BNXT_QPLIB_RCFW_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -692,6 +692,10 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -692,6 +692,10 @@ static int send_connect(struct c4iw_ep *ep)
int ret; int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
u32 isn = (prandom_u32() & ~7UL) - 1; u32 isn = (prandom_u32() & ~7UL) - 1;
struct net_device *netdev;
u64 params;
netdev = ep->com.dev->rdev.lldi.ports[0];
switch (CHELSIO_CHIP_VERSION(adapter_type)) { switch (CHELSIO_CHIP_VERSION(adapter_type)) {
case CHELSIO_T4: case CHELSIO_T4:
...@@ -768,6 +772,8 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -768,6 +772,8 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= T5_ISS_F; opt2 |= T5_ISS_F;
} }
params = cxgb4_select_ntuple(netdev, ep->l2t);
if (ep->com.remote_addr.ss_family == AF_INET6) if (ep->com.remote_addr.ss_family == AF_INET6)
cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&la6->sin6_addr.s6_addr, 1); (const u32 *)&la6->sin6_addr.s6_addr, 1);
...@@ -809,18 +815,22 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -809,18 +815,22 @@ static int send_connect(struct c4iw_ep *ep)
req->opt0 = cpu_to_be64(opt0); req->opt0 = cpu_to_be64(opt0);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
req->params = cpu_to_be32(cxgb4_select_ntuple( req->params = cpu_to_be32(params);
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
req->opt2 = cpu_to_be32(opt2); req->opt2 = cpu_to_be32(opt2);
} else { } else {
t5req->params = cpu_to_be64(FILTER_TUPLE_V( if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
cxgb4_select_ntuple( t5req->params =
ep->com.dev->rdev.lldi.ports[0], cpu_to_be64(FILTER_TUPLE_V(params));
ep->l2t)));
t5req->rsvd = cpu_to_be32(isn); t5req->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, t5req->rsvd); PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2); t5req->opt2 = cpu_to_be32(opt2);
} else {
t6req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, t6req->rsvd);
t6req->opt2 = cpu_to_be32(opt2);
}
} }
} else { } else {
switch (CHELSIO_CHIP_VERSION(adapter_type)) { switch (CHELSIO_CHIP_VERSION(adapter_type)) {
...@@ -859,18 +869,24 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -859,18 +869,24 @@ static int send_connect(struct c4iw_ep *ep)
req6->opt0 = cpu_to_be64(opt0); req6->opt0 = cpu_to_be64(opt0);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
req6->params = cpu_to_be32(cxgb4_select_ntuple( req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)); ep->l2t));
req6->opt2 = cpu_to_be32(opt2); req6->opt2 = cpu_to_be32(opt2);
} else { } else {
t5req6->params = cpu_to_be64(FILTER_TUPLE_V( if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
cxgb4_select_ntuple( t5req6->params =
ep->com.dev->rdev.lldi.ports[0], cpu_to_be64(FILTER_TUPLE_V(params));
ep->l2t)));
t5req6->rsvd = cpu_to_be32(isn); t5req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd); PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
t5req6->opt2 = cpu_to_be32(opt2); t5req6->opt2 = cpu_to_be32(opt2);
} else {
t6req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__, t6req6->rsvd);
t6req6->opt2 = cpu_to_be32(opt2);
}
} }
} }
...@@ -2517,18 +2533,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2517,18 +2533,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
struct sockaddr_in *sin = (struct sockaddr_in *) struct sockaddr_in *sin = (struct sockaddr_in *)
&child_ep->com.local_addr; &child_ep->com.local_addr;
sin->sin_family = PF_INET; sin->sin_family = AF_INET;
sin->sin_port = local_port; sin->sin_port = local_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip; sin->sin_addr.s_addr = *(__be32 *)local_ip;
sin = (struct sockaddr_in *)&child_ep->com.local_addr; sin = (struct sockaddr_in *)&child_ep->com.local_addr;
sin->sin_family = PF_INET; sin->sin_family = AF_INET;
sin->sin_port = ((struct sockaddr_in *) sin->sin_port = ((struct sockaddr_in *)
&parent_ep->com.local_addr)->sin_port; &parent_ep->com.local_addr)->sin_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip; sin->sin_addr.s_addr = *(__be32 *)local_ip;
sin = (struct sockaddr_in *)&child_ep->com.remote_addr; sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
sin->sin_family = PF_INET; sin->sin_family = AF_INET;
sin->sin_port = peer_port; sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip; sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else { } else {
......
This diff is collapsed.
This diff is collapsed.
...@@ -331,10 +331,6 @@ struct diag_pkt { ...@@ -331,10 +331,6 @@ struct diag_pkt {
#define FULL_MGMT_P_KEY 0xFFFF #define FULL_MGMT_P_KEY 0xFFFF
#define DEFAULT_P_KEY LIM_MGMT_P_KEY #define DEFAULT_P_KEY LIM_MGMT_P_KEY
#define HFI1_AETH_CREDIT_SHIFT 24
#define HFI1_AETH_CREDIT_MASK 0x1F
#define HFI1_AETH_CREDIT_INVAL 0x1F
#define HFI1_MSN_MASK 0xFFFFFF
#define HFI1_FECN_SHIFT 31 #define HFI1_FECN_SHIFT 31
#define HFI1_FECN_MASK 1 #define HFI1_FECN_MASK 1
#define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT) #define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
*/ */
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/module.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment