Commit 724bdd09 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/ehca: Reject dynamic memory add/remove when ehca adapter is present
  IB/ehca: Fix reported max number of QPs and CQs in systems with >1 adapter
  IPoIB: Set netdev offload features properly for child (VLAN) interfaces
  IPoIB: Clean up ethtool support
  mlx4_core: Add Ethernet PCI device IDs
  mlx4_en: Add driver for Mellanox ConnectX 10GbE NIC
  mlx4_core: Multiple port type support
  mlx4_core: Ethernet MAC/VLAN management
  mlx4_core: Get ethernet MTU and default address from firmware
  mlx4_core: Support multiple pre-reserved QP regions
  Update NetEffect maintainer emails to Intel emails
  RDMA/cxgb3: Remove cmid reference on tid allocation failures
  IB/mad: Use krealloc() to resize snoop table
  IPoIB: Always initialize poll_timer to avoid crash on unload
  IB/ehca: Don't allow creating UC QP with SRQ
  mlx4_core: Add QP range reservation support
  RDMA/ucma: Test ucma_alloc_multicast() return against NULL, not with IS_ERR()
parents dc8dcad8 56f2fdaa
...@@ -2928,9 +2928,9 @@ S: Maintained ...@@ -2928,9 +2928,9 @@ S: Maintained
NETEFFECT IWARP RNIC DRIVER (IW_NES) NETEFFECT IWARP RNIC DRIVER (IW_NES)
P: Faisal Latif P: Faisal Latif
M: flatif@neteffect.com M: faisal.latif@intel.com
P: Chien Tung P: Chien Tung
M: ctung@neteffect.com M: chien.tin.tung@intel.com
L: general@lists.openfabrics.org L: general@lists.openfabrics.org
W: http://www.neteffect.com W: http://www.neteffect.com
S: Supported S: Supported
......
...@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info, ...@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
if (i == qp_info->snoop_table_size) { if (i == qp_info->snoop_table_size) {
/* Grow table. */ /* Grow table. */
new_snoop_table = kmalloc(sizeof mad_snoop_priv * new_snoop_table = krealloc(qp_info->snoop_table,
qp_info->snoop_table_size + 1, sizeof mad_snoop_priv *
GFP_ATOMIC); (qp_info->snoop_table_size + 1),
GFP_ATOMIC);
if (!new_snoop_table) { if (!new_snoop_table) {
i = -ENOMEM; i = -ENOMEM;
goto out; goto out;
} }
if (qp_info->snoop_table) {
memcpy(new_snoop_table, qp_info->snoop_table,
sizeof mad_snoop_priv *
qp_info->snoop_table_size);
kfree(qp_info->snoop_table);
}
qp_info->snoop_table = new_snoop_table; qp_info->snoop_table = new_snoop_table;
qp_info->snoop_table_size++; qp_info->snoop_table_size++;
} }
......
...@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, ...@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
mutex_lock(&file->mut); mutex_lock(&file->mut);
mc = ucma_alloc_multicast(ctx); mc = ucma_alloc_multicast(ctx);
if (IS_ERR(mc)) { if (!mc) {
ret = PTR_ERR(mc); ret = -ENOMEM;
goto err1; goto err1;
} }
......
...@@ -1942,6 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1942,6 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
fail3: fail3:
cxgb3_free_atid(ep->com.tdev, ep->atid); cxgb3_free_atid(ep->com.tdev, ep->atid);
fail2: fail2:
cm_id->rem_ref(cm_id);
put_ep(&ep->com); put_ep(&ep->com);
out: out:
return err; return err;
......
...@@ -128,6 +128,8 @@ struct ehca_shca { ...@@ -128,6 +128,8 @@ struct ehca_shca {
/* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
u32 hca_cap_mr_pgsize; u32 hca_cap_mr_pgsize;
int max_mtu; int max_mtu;
int max_num_qps;
int max_num_cqs;
atomic_t num_cqs; atomic_t num_cqs;
atomic_t num_qps; atomic_t num_qps;
}; };
......
...@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, ...@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) { if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
ehca_err(device, "Unable to create CQ, max number of %i " ehca_err(device, "Unable to create CQ, max number of %i "
"CQs reached.", ehca_max_cq); "CQs reached.", shca->max_num_cqs);
ehca_err(device, "To increase the maximum number of CQs " ehca_err(device, "To increase the maximum number of CQs "
"use the number_of_cqs module parameter.\n"); "use the number_of_cqs module parameter.\n");
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#endif #endif
#include <linux/notifier.h>
#include <linux/memory.h>
#include "ehca_classes.h" #include "ehca_classes.h"
#include "ehca_iverbs.h" #include "ehca_iverbs.h"
#include "ehca_mrmw.h" #include "ehca_mrmw.h"
...@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca) ...@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
/* Set maximum number of CQs and QPs to calculate EQ size */ /* Set maximum number of CQs and QPs to calculate EQ size */
if (ehca_max_qp == -1) if (shca->max_num_qps == -1)
ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES); shca->max_num_qps = min_t(int, rblock->max_qp,
else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) { EHCA_MAX_NUM_QUEUES);
ehca_gen_err("Requested number of QPs is out of range (1 - %i) " else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
"specified by HW", rblock->max_qp); ehca_gen_warn("The requested number of QPs is out of range "
ret = -EINVAL; "(1 - %i) specified by HW. Value is set to %i",
goto sense_attributes1; rblock->max_qp, rblock->max_qp);
shca->max_num_qps = rblock->max_qp;
} }
if (ehca_max_cq == -1) if (shca->max_num_cqs == -1)
ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES); shca->max_num_cqs = min_t(int, rblock->max_cq,
else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) { EHCA_MAX_NUM_QUEUES);
ehca_gen_err("Requested number of CQs is out of range (1 - %i) " else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
"specified by HW", rblock->max_cq); ehca_gen_warn("The requested number of CQs is out of range "
ret = -EINVAL; "(1 - %i) specified by HW. Value is set to %i",
goto sense_attributes1; rblock->max_cq, rblock->max_cq);
} }
/* query max MTU from first port -- it's the same for all ports */ /* query max MTU from first port -- it's the same for all ports */
...@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev, ...@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev,
ehca_gen_err("Cannot allocate shca memory."); ehca_gen_err("Cannot allocate shca memory.");
return -ENOMEM; return -ENOMEM;
} }
mutex_init(&shca->modify_mutex); mutex_init(&shca->modify_mutex);
atomic_set(&shca->num_cqs, 0); atomic_set(&shca->num_cqs, 0);
atomic_set(&shca->num_qps, 0); atomic_set(&shca->num_qps, 0);
shca->max_num_qps = ehca_max_qp;
shca->max_num_cqs = ehca_max_cq;
for (i = 0; i < ARRAY_SIZE(shca->sport); i++) for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
spin_lock_init(&shca->sport[i].mod_sqp_lock); spin_lock_init(&shca->sport[i].mod_sqp_lock);
...@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev, ...@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev,
goto probe1; goto probe1;
} }
eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp; eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
/* create event queues */ /* create event queues */
ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
if (ret) { if (ret) {
...@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data) ...@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data)
spin_unlock(&shca_list_lock); spin_unlock(&shca_list_lock);
} }
static int ehca_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
static unsigned long ehca_dmem_warn_time;
switch (action) {
case MEM_CANCEL_OFFLINE:
case MEM_CANCEL_ONLINE:
case MEM_ONLINE:
case MEM_OFFLINE:
return NOTIFY_OK;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
/* only ok if no hca is attached to the lpar */
spin_lock(&shca_list_lock);
if (list_empty(&shca_list)) {
spin_unlock(&shca_list_lock);
return NOTIFY_OK;
} else {
spin_unlock(&shca_list_lock);
if (printk_timed_ratelimit(&ehca_dmem_warn_time,
30 * 1000))
ehca_gen_err("DMEM operations are not allowed"
"as long as an ehca adapter is"
"attached to the LPAR");
return NOTIFY_BAD;
}
}
return NOTIFY_OK;
}
static struct notifier_block ehca_mem_nb = {
.notifier_call = ehca_mem_notifier,
};
static int __init ehca_module_init(void) static int __init ehca_module_init(void)
{ {
int ret; int ret;
...@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void) ...@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void)
goto module_init2; goto module_init2;
} }
ret = register_memory_notifier(&ehca_mem_nb);
if (ret) {
ehca_gen_err("Failed registering memory add/remove notifier");
goto module_init3;
}
if (ehca_poll_all_eqs != 1) { if (ehca_poll_all_eqs != 1) {
ehca_gen_err("WARNING!!!"); ehca_gen_err("WARNING!!!");
ehca_gen_err("It is possible to lose interrupts."); ehca_gen_err("It is possible to lose interrupts.");
...@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void) ...@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void)
return 0; return 0;
module_init3:
ibmebus_unregister_driver(&ehca_driver);
module_init2: module_init2:
ehca_destroy_slab_caches(); ehca_destroy_slab_caches();
...@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void) ...@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void)
ibmebus_unregister_driver(&ehca_driver); ibmebus_unregister_driver(&ehca_driver);
unregister_memory_notifier(&ehca_mem_nb);
ehca_destroy_slab_caches(); ehca_destroy_slab_caches();
ehca_destroy_comp_pool(); ehca_destroy_comp_pool();
......
...@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp( ...@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp(
u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
unsigned long flags; unsigned long flags;
if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) { if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
ehca_err(pd->device, "Unable to create QP, max number of %i " ehca_err(pd->device, "Unable to create QP, max number of %i "
"QPs reached.", ehca_max_qp); "QPs reached.", shca->max_num_qps);
ehca_err(pd->device, "To increase the maximum number of QPs " ehca_err(pd->device, "To increase the maximum number of QPs "
"use the number_of_qps module parameter.\n"); "use the number_of_qps module parameter.\n");
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
...@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp( ...@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp(
if (init_attr->srq) { if (init_attr->srq) {
my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
if (qp_type == IB_QPT_UC) {
ehca_err(pd->device, "UC with SRQ not supported");
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
has_srq = 1; has_srq = 1;
parms.ext_type = EQPT_SRQBASE; parms.ext_type = EQPT_SRQBASE;
parms.srq_qpn = my_srq->real_qp_num; parms.srq_qpn = my_srq->real_qp_num;
......
...@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) ...@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
int p, q; int p, q;
int ret; int ret;
for (p = 0; p < dev->dev->caps.num_ports; ++p) for (p = 0; p < dev->num_ports; ++p)
for (q = 0; q <= 1; ++q) { for (q = 0; q <= 1; ++q) {
agent = ib_register_mad_agent(&dev->ib_dev, p + 1, agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI, q ? IB_QPT_GSI : IB_QPT_SMI,
...@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) ...@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
return 0; return 0;
err: err:
for (p = 0; p < dev->dev->caps.num_ports; ++p) for (p = 0; p < dev->num_ports; ++p)
for (q = 0; q <= 1; ++q) for (q = 0; q <= 1; ++q)
if (dev->send_agent[p][q]) if (dev->send_agent[p][q])
ib_unregister_mad_agent(dev->send_agent[p][q]); ib_unregister_mad_agent(dev->send_agent[p][q]);
...@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) ...@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
struct ib_mad_agent *agent; struct ib_mad_agent *agent;
int p, q; int p, q;
for (p = 0; p < dev->dev->caps.num_ports; ++p) { for (p = 0; p < dev->num_ports; ++p) {
for (q = 0; q <= 1; ++q) { for (q = 0; q <= 1; ++q) {
agent = dev->send_agent[p][q]; agent = dev->send_agent[p][q];
dev->send_agent[p][q] = NULL; dev->send_agent[p][q] = NULL;
......
...@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.owner = THIS_MODULE; ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; ibdev->num_ports = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ibdev->num_ports++;
ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = 1; ibdev->ib_dev.num_comp_vectors = 1;
ibdev->ib_dev.dma_device = &dev->pdev->dev; ibdev->ib_dev.dma_device = &dev->pdev->dev;
...@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ...@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
struct mlx4_ib_dev *ibdev = ibdev_ptr; struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p; int p;
for (p = 1; p <= dev->caps.num_ports; ++p) for (p = 1; p <= ibdev->num_ports; ++p)
mlx4_CLOSE_PORT(dev, p); mlx4_CLOSE_PORT(dev, p);
mlx4_ib_mad_cleanup(ibdev); mlx4_ib_mad_cleanup(ibdev);
...@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, ...@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, int port) enum mlx4_dev_event event, int port)
{ {
struct ib_event ibev; struct ib_event ibev;
struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
if (port > ibdev->num_ports)
return;
switch (event) { switch (event) {
case MLX4_DEV_EVENT_PORT_UP: case MLX4_DEV_EVENT_PORT_UP:
......
...@@ -162,6 +162,7 @@ struct mlx4_ib_ah { ...@@ -162,6 +162,7 @@ struct mlx4_ib_ah {
struct mlx4_ib_dev { struct mlx4_ib_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct mlx4_dev *dev; struct mlx4_dev *dev;
int num_ports;
void __iomem *uar_map; void __iomem *uar_map;
struct mlx4_uar priv_uar; struct mlx4_uar priv_uar;
......
...@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
{ {
int qpn;
int err; int err;
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
...@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
} }
} }
err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); if (sqpn) {
qpn = sqpn;
} else {
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
if (err)
goto err_wrid;
}
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err) if (err)
goto err_wrid; goto err_qpn;
/* /*
* Hardware wants QPN written in big-endian order (after * Hardware wants QPN written in big-endian order (after
...@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0; return 0;
err_qpn:
if (!sqpn)
mlx4_qp_release_range(dev->dev, qpn, 1);
err_wrid: err_wrid:
if (pd->uobject) { if (pd->uobject) {
if (!init_attr->srq) if (!init_attr->srq)
...@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, ...@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_ib_unlock_cqs(send_cq, recv_cq); mlx4_ib_unlock_cqs(send_cq, recv_cq);
mlx4_qp_free(dev->dev, &qp->mqp); mlx4_qp_free(dev->dev, &qp->mqp);
if (!is_sqp(dev, qp))
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
mlx4_mtt_cleanup(dev->dev, &qp->mtt); mlx4_mtt_cleanup(dev->dev, &qp->mtt);
if (is_user) { if (is_user) {
......
...@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev); ...@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev);
void ipoib_drain_cq(struct net_device *dev); void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev); void ipoib_set_ethtool_ops(struct net_device *dev);
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
#ifdef CONFIG_INFINIBAND_IPOIB_CM #ifdef CONFIG_INFINIBAND_IPOIB_CM
......
...@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev, ...@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
} }
static u32 ipoib_get_rx_csum(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
static int ipoib_get_coalesce(struct net_device *dev, static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal) struct ethtool_coalesce *coal)
{ {
...@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev, ...@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops ipoib_ethtool_ops = { static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo, .get_drvinfo = ipoib_get_drvinfo,
.get_tso = ethtool_op_get_tso, .get_rx_csum = ipoib_get_rx_csum,
.get_coalesce = ipoib_get_coalesce, .get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce, .set_coalesce = ipoib_set_coalesce,
.get_flags = ethtool_op_get_flags, .get_flags = ethtool_op_get_flags,
......
...@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev) ...@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ)); round_jiffies_relative(HZ));
init_timer(&priv->poll_timer);
priv->poll_timer.function = ipoib_ib_tx_timer_func;
priv->poll_timer.data = (unsigned long)dev;
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
return 0; return 0;
...@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) ...@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
return -ENODEV; return -ENODEV;
} }
setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
(unsigned long) dev);
if (dev->flags & IFF_UP) { if (dev->flags & IFF_UP) {
if (ipoib_ib_dev_open(dev)) { if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev); ipoib_transport_dev_cleanup(dev);
......
...@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev) ...@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey); return device_create_file(&dev->dev, &dev_attr_pkey);
} }
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
{
struct ib_device_attr *device_attr;
int result = -ENOMEM;
device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
if (!device_attr) {
printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
hca->name, sizeof *device_attr);
return result;
}
result = ib_query_device(hca, device_attr);
if (result) {
printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
hca->name, result);
kfree(device_attr);
return result;
}
priv->hca_caps = device_attr->device_cap_flags;
kfree(device_attr);
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
set_bit(IPOIB_FLAG_CSUM, &priv->flags);
priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
}
if (lro)
priv->dev->features |= NETIF_F_LRO;
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->features |= NETIF_F_TSO;
return 0;
}
static struct net_device *ipoib_add_port(const char *format, static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port) struct ib_device *hca, u8 port)
{ {
struct ipoib_dev_priv *priv; struct ipoib_dev_priv *priv;
struct ib_device_attr *device_attr;
struct ib_port_attr attr; struct ib_port_attr attr;
int result = -ENOMEM; int result = -ENOMEM;
...@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed; goto device_init_failed;
} }
device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); if (ipoib_set_dev_features(priv, hca))
if (!device_attr) {
printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
hca->name, sizeof *device_attr);
goto device_init_failed; goto device_init_failed;
}
result = ib_query_device(hca, device_attr);
if (result) {
printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
hca->name, result);
kfree(device_attr);
goto device_init_failed;
}
priv->hca_caps = device_attr->device_cap_flags;
kfree(device_attr);
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
set_bit(IPOIB_FLAG_CSUM, &priv->flags);
priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
}
if (lro)
priv->dev->features |= NETIF_F_LRO;
/* /*
* Set the full membership bit, so that we join the right * Set the full membership bit, so that we join the right
...@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format,
goto event_failed; goto event_failed;
} }
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->features |= NETIF_F_TSO;
result = register_netdev(priv->dev); result = register_netdev(priv->dev);
if (result) { if (result) {
printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
......
...@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
result = ipoib_set_dev_features(priv, ppriv->ca);
if (result)
goto device_init_failed;
priv->pkey = pkey; priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
......
...@@ -2504,6 +2504,15 @@ config PASEMI_MAC ...@@ -2504,6 +2504,15 @@ config PASEMI_MAC
This driver supports the on-chip 1/10Gbit Ethernet controller on This driver supports the on-chip 1/10Gbit Ethernet controller on
PA Semi's PWRficient line of chips. PA Semi's PWRficient line of chips.
config MLX4_EN
tristate "Mellanox Technologies 10Gbit Ethernet support"
depends on PCI && INET
select MLX4_CORE
select INET_LRO
help
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
config MLX4_CORE config MLX4_CORE
tristate tristate
depends on PCI depends on PCI
......
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
mr.o pd.o profile.o qp.o reset.o srq.o mr.o pd.o port.o profile.o qp.o reset.o srq.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
en_resources.o en_netdev.o
...@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) ...@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (obj >= bitmap->max) { if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_first_zero_bit(bitmap->table, bitmap->max); obj = find_first_zero_bit(bitmap->table, bitmap->max);
} }
if (obj < bitmap->max) { if (obj < bitmap->max) {
set_bit(obj, bitmap->table); set_bit(obj, bitmap->table);
bitmap->last = (obj + 1) & (bitmap->max - 1); bitmap->last = (obj + 1);
if (bitmap->last == bitmap->max)
bitmap->last = 0;
obj |= bitmap->top; obj |= bitmap->top;
} else } else
obj = -1; obj = -1;
...@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) ...@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
{ {
obj &= bitmap->max - 1; mlx4_bitmap_free_range(bitmap, obj, 1);
}
static unsigned long find_aligned_range(unsigned long *bitmap,
u32 start, u32 nbits,
int len, int align)
{
unsigned long end, i;
again:
start = ALIGN(start, align);
while ((start < nbits) && test_bit(start, bitmap))
start += align;
if (start >= nbits)
return -1;
end = start+len;
if (end > nbits)
return -1;
for (i = start + 1; i < end; i++) {
if (test_bit(i, bitmap)) {
start = i + 1;
goto again;
}
}
return start;
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
u32 obj, i;
if (likely(cnt == 1 && align == 1))
return mlx4_bitmap_alloc(bitmap);
spin_lock(&bitmap->lock);
obj = find_aligned_range(bitmap->table, bitmap->last,
bitmap->max, cnt, align);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_aligned_range(bitmap->table, 0, bitmap->max,
cnt, align);
}
if (obj < bitmap->max) {
for (i = 0; i < cnt; i++)
set_bit(obj + i, bitmap->table);
if (obj == bitmap->last) {
bitmap->last = (obj + cnt);
if (bitmap->last >= bitmap->max)
bitmap->last = 0;
}
obj |= bitmap->top;
} else
obj = -1;
spin_unlock(&bitmap->lock);
return obj;
}
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
{
u32 i;
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock); spin_lock(&bitmap->lock);
clear_bit(obj, bitmap->table); for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
bitmap->last = min(bitmap->last, obj); bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock); spin_unlock(&bitmap->lock);
} }
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{ {
int i; int i;
...@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved ...@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
bitmap->last = 0; bitmap->last = 0;
bitmap->top = 0; bitmap->top = 0;
bitmap->max = num; bitmap->max = num - reserved_top;
bitmap->mask = mask; bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock); spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
sizeof (long), GFP_KERNEL);
if (!bitmap->table) if (!bitmap->table)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < reserved; ++i) for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table); set_bit(i, bitmap->table);
return 0; return 0;
......
...@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev) ...@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs); dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
if (err) if (err)
return err; return err;
......
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
{
return;
}
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
cq->size = entries;
if (mode == RX)
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
else
cq->buf_size = sizeof(struct mlx4_cqe);
cq->ring = ring;
cq->is_tx = mode;
spin_lock_init(&cq->lock);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
if (err)
return err;
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
return err;
}
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
cq->mcq.arm_db = cq->wqres.db.db + 1;
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
memset(cq->buf, 0, cq->buf_size);
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
cq->wqres.db.dma, &cq->mcq, cq->is_tx);
if (err)
return err;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
if (cq->is_tx) {
init_timer(&cq->timer);
cq->timer.function = mlx4_en_poll_tx_cq;
cq->timer.data = (unsigned long) cq;
} else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_enable(&cq->napi);
}
return 0;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
cq->buf_size = 0;
cq->buf = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
if (cq->is_tx)
del_timer(&cq->timer);
else
napi_disable(&cq->napi);
mlx4_cq_free(mdev->dev, &cq->mcq);
}
/* Set rx cq moderation parameters */
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
cq->moder_cnt, cq->moder_time);
}
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
cq->armed = 1;
mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
&priv->mdev->uar_lock);
return 0;
}
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port)
{
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv;
if (!mdev->pndev[port])
return;
priv = netdev_priv(mdev->pndev[port]);
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
case MLX4_DEV_EVENT_PORT_DOWN:
/* To prevent races, we poll the link state in a separate
task rather than changing it here */
priv->link_state = event;
queue_work(mdev->workqueue, &priv->linkstate_task);
break;
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
default:
mlx4_warn(mdev, "Unhandled event: %d\n", event);
}
}
static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
{
struct mlx4_en_dev *mdev = endev_ptr;
int i;
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
mutex_unlock(&mdev->state_lock);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
mlx4_mr_free(dev, &mdev->mr);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
kfree(mdev);
}
static void *mlx4_en_add(struct mlx4_dev *dev)
{
static int mlx4_en_version_printed;
struct mlx4_en_dev *mdev;
int i;
int err;
if (!mlx4_en_version_printed) {
printk(KERN_INFO "%s", mlx4_en_version);
mlx4_en_version_printed++;
}
mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
if (!mdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
goto err_free_dev;
if (mlx4_uar_alloc(dev, &mdev->priv_uar))
goto err_pd;
mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!mdev->uar_map)
goto err_uar;
spin_lock_init(&mdev->uar_lock);
mdev->dev = dev;
mdev->dma_device = &(dev->pdev->dev);
mdev->pdev = dev->pdev;
mdev->device_up = false;
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
mlx4_warn(mdev, "LSO not supported, please upgrade to later "
"FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
0, 0, &mdev->mr)) {
mlx4_err(mdev, "Failed allocating memory region\n");
goto err_uar;
}
if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
mlx4_err(mdev, "Failed enabling memory region\n");
goto err_mr;
}
/* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev);
if (err) {
mlx4_err(mdev, "Bad module parameters, aborting.\n");
goto err_mr;
}
/* Configure wich ports to start according to module parameters */
mdev->port_cnt = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
/* If we did not receive an explicit number of Rx rings, default to
* the number of completion vectors populated by the mlx4_core */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
mdev->profile.prof[i].tx_ring_num, i);
if (!mdev->profile.prof[i].rx_ring_num) {
mdev->profile.prof[i].rx_ring_num = 1;
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
1, i);
} else
mlx4_info(mdev, "Using %d rx rings for port:%d\n",
mdev->profile.prof[i].rx_ring_num, i);
}
/* Create our own workqueue for reset/multicast tasks
* Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) {
err = -ENOMEM;
goto err_close_nic;
}
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
mutex_init(&mdev->state_lock);
mdev->device_up = true;
/* Setup ports */
/* Create a netdev for each port */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Activating port:%d\n", i);
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
mdev->pndev[i] = NULL;
goto err_free_netdev;
}
}
return mdev;
err_free_netdev:
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
}
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
mutex_unlock(&mdev->state_lock);
flush_workqueue(mdev->workqueue);
/* Stop event queue before we drop down to release shared SW state */
err_close_nic:
destroy_workqueue(mdev->workqueue);
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err_uar:
mlx4_uar_free(dev, &mdev->priv_uar);
err_pd:
mlx4_pd_free(dev, mdev->priv_pdn);
err_free_dev:
kfree(mdev);
err_free_res:
return NULL;
}
static struct mlx4_interface mlx4_en_interface = {
.add = mlx4_en_add,
.remove = mlx4_en_remove,
.event = mlx4_en_event,
};
static int __init mlx4_en_init(void)
{
return mlx4_register_interface(&mlx4_en_interface);
}
static void __exit mlx4_en_cleanup(void)
{
mlx4_unregister_interface(&mlx4_en_interface);
}
module_init(mlx4_en_init);
module_exit(mlx4_en_cleanup);
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/if_vlan.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include "en_port.h"
#include "mlx4_en.h"
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
u64 mac, u64 clear, u8 mode)
{
return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
}
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vlan_fltr_mbox *filter;
int i;
int j;
int index = 0;
u32 entry;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
filter = mailbox->buf;
if (grp) {
memset(filter, 0, sizeof *filter);
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
if (vlan_group_get_device(grp, index++))
entry |= 1 << j;
filter->entry[i] = cpu_to_be32(entry);
}
} else {
/* When no vlans are configured we block all vlans */
memset(filter, 0, sizeof(*filter));
}
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof *context);
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
context->pfctx = pfctx;
context->pprx = (pprx * (!pfcrx)) << 7;
context->pfcrx = pfcrx;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
context->vlan_miss = MLX4_VLAN_MISS_IDX;
in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
struct net_device_stats *stats = &priv->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
if (err)
goto out;
mlx4_en_stats = mailbox->buf;
spin_lock_bh(&priv->stats_lock);
stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
be32_to_cpu(mlx4_en_stats->RDROP);
stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
be64_to_cpu(mlx4_en_stats->ROCT_novlan);
stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RdropLength) +
be32_to_cpu(mlx4_en_stats->RJBBR) +
be32_to_cpu(mlx4_en_stats->RCRC) +
be32_to_cpu(mlx4_en_stats->RRUNT);
stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->MCAST_novlan);
stats->collisions = 0;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = 0;
stats->tx_fifo_errors = 0;
stats->tx_heartbeat_errors = 0;
stats->tx_window_errors = 0;
priv->pkstats.broadcast =
be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
spin_unlock_bh(&priv->stats_lock);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
}
This diff is collapsed.
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/vmalloc.h>
#include <linux/mlx4/qp.h>
#include "mlx4_en.h"
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int srqn,
struct mlx4_qp_context *context)
{
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
context->flags = cpu_to_be32(7 << 16 | rss << 13);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
context->rq_size_stride = 0;
if (is_tx)
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
else
context->sq_size_stride = 1;
context->usr_page = cpu_to_be32(mdev->priv_uar.index);
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
context->pri_path.counter_index = 0xff;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!rss)
context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
}
int mlx4_en_map_buffer(struct mlx4_buf *buf)
{
struct page **pages;
int i;
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return 0;
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
return -ENOMEM;
return 0;
}
void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
{
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return;
vunmap(buf->direct.buf);
}
This diff is collapsed.
This diff is collapsed.
...@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) ...@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
int i; int i;
err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
dev->caps.num_eqs - 1, dev->caps.reserved_eqs); dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
if (err) if (err)
return err; return err;
......
...@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) ...@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[ 8] = "P_Key violation counter", [ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter", [ 9] = "Q_Key violation counter",
[10] = "VMM", [10] = "VMM",
[12] = "DPDP",
[16] = "MW support", [16] = "MW support",
[17] = "APM support", [17] = "APM support",
[18] = "Atomic ops support", [18] = "Atomic ops support",
...@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->max_vl[i] = field >> 4; dev_cap->max_vl[i] = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
dev_cap->max_mtu[i] = field >> 4; dev_cap->ib_mtu[i] = field >> 4;
dev_cap->max_port_width[i] = field & 0xf; dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
dev_cap->max_gids[i] = 1 << (field & 0xf); dev_cap->max_gids[i] = 1 << (field & 0xf);
...@@ -354,9 +355,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -354,9 +355,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pkeys[i] = 1 << (field & 0xf); dev_cap->max_pkeys[i] = 1 << (field & 0xf);
} }
} else { } else {
#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
#define QUERY_PORT_MTU_OFFSET 0x01 #define QUERY_PORT_MTU_OFFSET 0x01
#define QUERY_PORT_ETH_MTU_OFFSET 0x02
#define QUERY_PORT_WIDTH_OFFSET 0x06 #define QUERY_PORT_WIDTH_OFFSET 0x06
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
#define QUERY_PORT_MAC_OFFSET 0x08
#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b #define QUERY_PORT_MAX_VL_OFFSET 0x0b
for (i = 1; i <= dev_cap->num_ports; ++i) { for (i = 1; i <= dev_cap->num_ports; ++i) {
...@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (err) if (err)
goto out; goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->max_mtu[i] = field & 0xf; dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
dev_cap->max_port_width[i] = field & 0xf; dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
...@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pkeys[i] = 1 << (field & 0xf); dev_cap->max_pkeys[i] = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
dev_cap->max_vl[i] = field & 0xf; dev_cap->max_vl[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
dev_cap->log_max_macs[i] = field & 0xf;
dev_cap->log_max_vlans[i] = field >> 4;
MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
} }
} }
...@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
dev_cap->max_port_width[1]); dev_cap->max_port_width[1]);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
...@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) ...@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
field = 128 << dev->caps.mtu_cap[port]; field = 128 << dev->caps.ib_mtu_cap[port];
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
field = dev->caps.gid_table_len[port]; field = dev->caps.gid_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
......
...@@ -66,11 +66,13 @@ struct mlx4_dev_cap { ...@@ -66,11 +66,13 @@ struct mlx4_dev_cap {
int local_ca_ack_delay; int local_ca_ack_delay;
int num_ports; int num_ports;
u32 max_msg_sz; u32 max_msg_sz;
int max_mtu[MLX4_MAX_PORTS + 1]; int ib_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1]; int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1]; int max_vl[MLX4_MAX_PORTS + 1];
int max_gids[MLX4_MAX_PORTS + 1]; int max_gids[MLX4_MAX_PORTS + 1];
int max_pkeys[MLX4_MAX_PORTS + 1]; int max_pkeys[MLX4_MAX_PORTS + 1];
u64 def_mac[MLX4_MAX_PORTS + 1];
u16 eth_mtu[MLX4_MAX_PORTS + 1];
u16 stat_rate_support; u16 stat_rate_support;
u32 flags; u32 flags;
int reserved_uars; int reserved_uars;
...@@ -102,6 +104,9 @@ struct mlx4_dev_cap { ...@@ -102,6 +104,9 @@ struct mlx4_dev_cap {
u32 reserved_lkey; u32 reserved_lkey;
u64 max_icm_sz; u64 max_icm_sz;
int max_gso_sz; int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
}; };
struct mlx4_adapter { struct mlx4_adapter {
......
This diff is collapsed.
...@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev) ...@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
int err; int err;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); dev->caps.num_amgms - 1, 0, 0);
if (err) if (err)
return err; return err;
......
...@@ -111,6 +111,7 @@ struct mlx4_bitmap { ...@@ -111,6 +111,7 @@ struct mlx4_bitmap {
u32 last; u32 last;
u32 top; u32 top;
u32 max; u32 max;
u32 reserved_top;
u32 mask; u32 mask;
spinlock_t lock; spinlock_t lock;
unsigned long *table; unsigned long *table;
...@@ -251,6 +252,38 @@ struct mlx4_catas_err { ...@@ -251,6 +252,38 @@ struct mlx4_catas_err {
struct list_head list; struct list_head list;
}; };
#define MLX4_MAX_MAC_NUM 128
#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
struct mlx4_mac_table {
__be64 entries[MLX4_MAX_MAC_NUM];
int refs[MLX4_MAX_MAC_NUM];
struct mutex mutex;
int total;
int max;
};
#define MLX4_MAX_VLAN_NUM 128
#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
struct mlx4_vlan_table {
__be32 entries[MLX4_MAX_VLAN_NUM];
int refs[MLX4_MAX_VLAN_NUM];
struct mutex mutex;
int total;
int max;
};
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
char dev_name[16];
struct device_attribute port_attr;
enum mlx4_port_type tmp_type;
struct mlx4_mac_table mac_table;
struct mlx4_vlan_table vlan_table;
};
struct mlx4_priv { struct mlx4_priv {
struct mlx4_dev dev; struct mlx4_dev dev;
...@@ -279,6 +312,8 @@ struct mlx4_priv { ...@@ -279,6 +312,8 @@ struct mlx4_priv {
struct mlx4_uar driver_uar; struct mlx4_uar driver_uar;
void __iomem *kar; void __iomem *kar;
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
struct mutex port_mutex;
}; };
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
...@@ -288,7 +323,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) ...@@ -288,7 +323,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
int mlx4_reset(struct mlx4_dev *dev); int mlx4_reset(struct mlx4_dev *dev);
...@@ -346,4 +384,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); ...@@ -346,4 +384,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev); void mlx4_handle_catas_err(struct mlx4_dev *dev);
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
#endif /* MLX4_H */ #endif /* MLX4_H */
This diff is collapsed.
...@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) ...@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
int err; int err;
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws); ~0, dev->caps.reserved_mrws, 0);
if (err) if (err)
return err; return err;
......
...@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev) ...@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
(1 << 24) - 1, dev->caps.reserved_pds); (1 << 24) - 1, dev->caps.reserved_pds, 0);
} }
void mlx4_cleanup_pd_table(struct mlx4_dev *dev) void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
...@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev) ...@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1, dev->caps.num_uars, dev->caps.num_uars - 1,
max(128, dev->caps.reserved_uars)); max(128, dev->caps.reserved_uars), 0);
} }
void mlx4_cleanup_uar_table(struct mlx4_dev *dev) void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
......
This diff is collapsed.
...@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
} }
EXPORT_SYMBOL_GPL(mlx4_qp_modify); EXPORT_SYMBOL_GPL(mlx4_qp_modify);
int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int qpn;
qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
if (qpn == -1)
return -ENOMEM;
*base = qpn;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
if (base_qpn < dev->caps.sqp_start + 8)
return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table; struct mlx4_qp_table *qp_table = &priv->qp_table;
int err; int err;
if (sqpn) if (!qpn)
qp->qpn = sqpn; return -EINVAL;
else {
qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); qp->qpn = qpn;
if (qp->qpn == -1)
return -ENOMEM;
}
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
if (err) if (err)
...@@ -208,9 +231,6 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) ...@@ -208,9 +231,6 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
err_out: err_out:
if (!sqpn)
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_qp_alloc); EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
...@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) ...@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
if (qp->qpn >= dev->caps.sqp_start + 8)
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
} }
EXPORT_SYMBOL_GPL(mlx4_qp_free); EXPORT_SYMBOL_GPL(mlx4_qp_free);
...@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) ...@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{ {
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err; int err;
int reserved_from_top = 0;
spin_lock_init(&qp_table->lock); spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
...@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) ...@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
* block of special QPs must be aligned to a multiple of 8, so * block of special QPs must be aligned to a multiple of 8, so
* round up. * round up.
*/ */
dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); dev->caps.sqp_start =
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
{
int sort[MLX4_NUM_QP_REGION];
int i, j, tmp;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
sort[i] = i;
for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
for (j = 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
dev->caps.reserved_qps_cnt[sort[j - 1]]) {
tmp = sort[j];
sort[j] = sort[j - 1];
sort[j - 1] = tmp;
}
}
}
for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
dev->caps.reserved_qps_base[sort[i]] = last_base;
reserved_from_top +=
dev->caps.reserved_qps_cnt[sort[i]];
}
}
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
(1 << 24) - 1, dev->caps.sqp_start + 8); (1 << 23) - 1, dev->caps.sqp_start + 8,
reserved_from_top);
if (err) if (err)
return err; return err;
......
...@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev) ...@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs); dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
if (err) if (err)
return err; return err;
......
...@@ -132,6 +132,15 @@ enum { ...@@ -132,6 +132,15 @@ enum {
MLX4_MAILBOX_SIZE = 4096 MLX4_MAILBOX_SIZE = 4096
}; };
enum {
/* set port opcode modifiers */
MLX4_SET_PORT_GENERAL = 0x0,
MLX4_SET_PORT_RQP_CALC = 0x1,
MLX4_SET_PORT_MAC_TABLE = 0x2,
MLX4_SET_PORT_VLAN_TABLE = 0x3,
MLX4_SET_PORT_PRIO_MAP = 0x4,
};
struct mlx4_dev; struct mlx4_dev;
struct mlx4_cmd_mailbox { struct mlx4_cmd_mailbox {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment