Commit 5ae2866f authored by Roland Dreier's avatar Roland Dreier

Merge branches 'cxgb4', 'misc', 'mlx5' and 'qib' into for-next

...@@ -282,6 +282,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, ...@@ -282,6 +282,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->sig_guard_cap = IB_GUARD_T10DIF_CRC | props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
IB_GUARD_T10DIF_CSUM; IB_GUARD_T10DIF_CSUM;
} }
if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
0xffffff; 0xffffff;
......
...@@ -807,6 +807,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -807,6 +807,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock); spin_lock_init(&qp->rq.lock);
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
return -EINVAL;
} else {
qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
}
}
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
...@@ -878,6 +887,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -878,6 +887,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (qp->wq_sig) if (qp->wq_sig)
in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) { if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
int rcqe_sz; int rcqe_sz;
int scqe_sz; int scqe_sz;
......
...@@ -858,13 +858,9 @@ static int mthca_enable_msi_x(struct mthca_dev *mdev) ...@@ -858,13 +858,9 @@ static int mthca_enable_msi_x(struct mthca_dev *mdev)
entries[1].entry = 1; entries[1].entry = 1;
entries[2].entry = 2; entries[2].entry = 2;
err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries)); err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries));
if (err) { if (err)
if (err > 0)
mthca_info(mdev, "Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
return err; return err;
}
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
......
...@@ -197,46 +197,47 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, ...@@ -197,46 +197,47 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
struct qib_msix_entry *qib_msix_entry) struct qib_msix_entry *qib_msix_entry)
{ {
int ret; int ret;
u32 tabsize = 0; int nvec = *msixcnt;
u16 msix_flags;
struct msix_entry *msix_entry; struct msix_entry *msix_entry;
int i; int i;
ret = pci_msix_vec_count(dd->pcidev);
if (ret < 0)
goto do_intx;
nvec = min(nvec, ret);
/* We can't pass qib_msix_entry array to qib_msix_setup /* We can't pass qib_msix_entry array to qib_msix_setup
* so use a dummy msix_entry array and copy the allocated * so use a dummy msix_entry array and copy the allocated
* irq back to the qib_msix_entry array. */ * irq back to the qib_msix_entry array. */
msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry) { if (!msix_entry)
ret = -ENOMEM;
goto do_intx; goto do_intx;
}
for (i = 0; i < *msixcnt; i++) for (i = 0; i < nvec; i++)
msix_entry[i] = qib_msix_entry[i].msix; msix_entry[i] = qib_msix_entry[i].msix;
pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); if (ret < 0)
if (tabsize > *msixcnt) goto free_msix_entry;
tabsize = *msixcnt; else
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); nvec = ret;
if (ret > 0) {
tabsize = ret; for (i = 0; i < nvec; i++)
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
}
do_intx:
if (ret) {
qib_dev_err(dd,
"pci_enable_msix %d vectors failed: %d, falling back to INTx\n",
tabsize, ret);
tabsize = 0;
}
for (i = 0; i < tabsize; i++)
qib_msix_entry[i].msix = msix_entry[i]; qib_msix_entry[i].msix = msix_entry[i];
kfree(msix_entry); kfree(msix_entry);
*msixcnt = tabsize; *msixcnt = nvec;
return;
if (ret) free_msix_entry:
qib_enable_intx(dd->pcidev); kfree(msix_entry);
do_intx:
qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
"falling back to INTx\n", nvec, ret);
*msixcnt = 0;
qib_enable_intx(dd->pcidev);
} }
/** /**
......
...@@ -179,6 +179,7 @@ enum { ...@@ -179,6 +179,7 @@ enum {
MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
......
...@@ -146,6 +146,7 @@ enum { ...@@ -146,6 +146,7 @@ enum {
enum { enum {
MLX5_QP_LAT_SENSITIVE = 1 << 28, MLX5_QP_LAT_SENSITIVE = 1 << 28,
MLX5_QP_BLOCK_MCAST = 1 << 30,
MLX5_QP_ENABLE_SIG = 1 << 31, MLX5_QP_ENABLE_SIG = 1 << 31,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment