Commit d310c4bf authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Jason Gunthorpe

IB/{rdmavt, hfi1, qib}: Remove AH refcount for UD QPs

Historically rdmavt destroy_ah() has returned an -EBUSY when the AH has a
non-zero reference count.  IBTA 11.2.2 notes no such return value or error
case:

	Output Modifiers:
	- Verb results:
	- Operation completed successfully.
	- Invalid HCA handle.
	- Invalid address handle.

ULPs never test for this error and this will leak memory.

The reference count exists to allow for driver independent progress
mechanisms to process UD SWQEs in parallel with post sends.  The SWQE will
hold a reference count until the UD SWQE completes and then drops the
reference.

Fix by removing need to reference count the AH.  Add a UD specific
allocation to each SWQE entry to cache the necessary information for
independent progress.  Copy the information during the post send
processing.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent fe2ac047
/* /*
* Copyright(c) 2015 - 2018 Intel Corporation. * Copyright(c) 2015 - 2019 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -348,7 +348,7 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) ...@@ -348,7 +348,7 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
break; break;
case IB_QPT_GSI: case IB_QPT_GSI:
case IB_QPT_UD: case IB_QPT_UD:
ah = ibah_to_rvtah(wqe->ud_wr.ah); ah = ibah_to_rvtah(wqe->ud_wr.wr.ah);
if (wqe->length > (1 << ah->log_pmtu)) if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL; return -EINVAL;
if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
......
/* /*
* Copyright(c) 2015 - 2018 Intel Corporation. * Copyright(c) 2015 - 2019 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -87,7 +87,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -87,7 +87,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
rcu_read_lock(); rcu_read_lock();
qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
swqe->ud_wr.remote_qpn); swqe->ud_wr.wr.remote_qpn);
if (!qp) { if (!qp) {
ibp->rvp.n_pkt_drops++; ibp->rvp.n_pkt_drops++;
rcu_read_unlock(); rcu_read_unlock();
...@@ -105,7 +105,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -105,7 +105,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
goto drop; goto drop;
} }
ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr; ah_attr = swqe->ud_wr.attr;
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) { if (qp->ibqp.qp_num > 1) {
...@@ -135,8 +135,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -135,8 +135,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (qp->ibqp.qp_num) { if (qp->ibqp.qp_num) {
u32 qkey; u32 qkey;
qkey = (int)swqe->ud_wr.remote_qkey < 0 ? qkey = (int)swqe->ud_wr.wr.remote_qkey < 0 ?
sqp->qkey : swqe->ud_wr.remote_qkey; sqp->qkey : swqe->ud_wr.wr.remote_qkey;
if (unlikely(qkey != qp->qkey)) if (unlikely(qkey != qp->qkey))
goto drop; /* silently drop per IBTA spec */ goto drop; /* silently drop per IBTA spec */
} }
...@@ -240,7 +240,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -240,7 +240,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
if (sqp->ibqp.qp_type == IB_QPT_GSI || if (sqp->ibqp.qp_type == IB_QPT_GSI ||
sqp->ibqp.qp_type == IB_QPT_SMI) sqp->ibqp.qp_type == IB_QPT_SMI)
wc.pkey_index = swqe->ud_wr.pkey_index; wc.pkey_index = swqe->ud_wr.wr.pkey_index;
else else
wc.pkey_index = sqp->s_pkey_index; wc.pkey_index = sqp->s_pkey_index;
} else { } else {
...@@ -282,20 +282,20 @@ static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -282,20 +282,20 @@ static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
bth0 |= IB_BTH_SOLICITED; bth0 |= IB_BTH_SOLICITED;
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
*pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index); *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.wr.pkey_index);
else else
*pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
if (!bypass) if (!bypass)
bth0 |= *pkey; bth0 |= *pkey;
ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn); ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.wr.remote_qpn);
ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
/* /*
* Qkeys with the high order bit set mean use the * Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5). * qkey from the QP context instead of the WR (see 10.2.5).
*/ */
ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.wr.remote_qkey < 0 ?
qp->qkey : wqe->ud_wr.remote_qkey); qp->qkey : wqe->ud_wr.wr.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
} }
...@@ -315,7 +315,7 @@ void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -315,7 +315,7 @@ void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; ah_attr = wqe->ud_wr.attr;
extra_bytes = -wqe->length & 3; extra_bytes = -wqe->length & 3;
nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC; nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
...@@ -379,7 +379,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -379,7 +379,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp; struct hfi1_ibport *ibp;
u32 dlid, slid, nwords, extra_bytes; u32 dlid, slid, nwords, extra_bytes;
u32 dest_qp = wqe->ud_wr.remote_qpn; u32 dest_qp = wqe->ud_wr.wr.remote_qpn;
u32 src_qp = qp->ibqp.qp_num; u32 src_qp = qp->ibqp.qp_num;
u16 len, pkey; u16 len, pkey;
u8 l4, sc5; u8 l4, sc5;
...@@ -387,7 +387,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -387,7 +387,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; ah_attr = wqe->ud_wr.attr;
/* /*
* Build 16B Management Packet if either the destination * Build 16B Management Packet if either the destination
...@@ -449,7 +449,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -449,7 +449,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (is_mgmt) { if (is_mgmt) {
l4 = OPA_16B_L4_FM; l4 = OPA_16B_L4_FM;
pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index); pkey = hfi1_get_pkey(ibp, wqe->ud_wr.wr.pkey_index);
hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt, hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt,
dest_qp, src_qp); dest_qp, src_qp);
} else { } else {
...@@ -514,7 +514,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -514,7 +514,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* Construct the header. */ /* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; ah_attr = wqe->ud_wr.attr;
priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr); priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) || if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
(rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) { (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
......
/* /*
* Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved. * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
...@@ -398,7 +398,7 @@ int qib_check_send_wqe(struct rvt_qp *qp, ...@@ -398,7 +398,7 @@ int qib_check_send_wqe(struct rvt_qp *qp,
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
case IB_QPT_UD: case IB_QPT_UD:
ah = ibah_to_rvtah(wqe->ud_wr.ah); ah = ibah_to_rvtah(wqe->ud_wr.wr.ah);
if (wqe->length > (1 << ah->log_pmtu)) if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL; return -EINVAL;
/* progress hint */ /* progress hint */
......
/* /*
* Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
...@@ -63,7 +64,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -63,7 +64,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
enum ib_qp_type sqptype, dqptype; enum ib_qp_type sqptype, dqptype;
rcu_read_lock(); rcu_read_lock();
qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn); qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.wr.remote_qpn);
if (!qp) { if (!qp) {
ibp->rvp.n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto drop; goto drop;
...@@ -80,7 +81,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -80,7 +81,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
goto drop; goto drop;
} }
ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr; ah_attr = swqe->ud_wr.attr;
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) { if (qp->ibqp.qp_num > 1) {
...@@ -110,8 +111,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -110,8 +111,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (qp->ibqp.qp_num) { if (qp->ibqp.qp_num) {
u32 qkey; u32 qkey;
qkey = (int)swqe->ud_wr.remote_qkey < 0 ? qkey = (int)swqe->ud_wr.wr.remote_qkey < 0 ?
sqp->qkey : swqe->ud_wr.remote_qkey; sqp->qkey : swqe->ud_wr.wr.remote_qkey;
if (unlikely(qkey != qp->qkey)) if (unlikely(qkey != qp->qkey))
goto drop; goto drop;
} }
...@@ -203,7 +204,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -203,7 +204,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
wc.qp = &qp->ibqp; wc.qp = &qp->ibqp;
wc.src_qp = sqp->ibqp.qp_num; wc.src_qp = sqp->ibqp.qp_num;
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
swqe->ud_wr.pkey_index : 0; swqe->ud_wr.wr.pkey_index : 0;
wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) & wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1)); ((1 << ppd->lmc) - 1));
wc.sl = rdma_ah_get_sl(ah_attr); wc.sl = rdma_ah_get_sl(ah_attr);
...@@ -270,7 +271,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -270,7 +271,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
/* Construct the header. */ /* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; ah_attr = wqe->ud_wr.attr;
if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
if (rdma_ah_get_dlid(ah_attr) != if (rdma_ah_get_dlid(ah_attr) !=
be16_to_cpu(IB_LID_PERMISSIVE)) be16_to_cpu(IB_LID_PERMISSIVE))
...@@ -362,7 +363,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -362,7 +363,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
wqe->ud_wr.pkey_index : qp->s_pkey_index); wqe->ud_wr.wr.pkey_index : qp->s_pkey_index);
ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[0] = cpu_to_be32(bth0);
/* /*
* Use the multicast QP if the destination LID is a multicast LID. * Use the multicast QP if the destination LID is a multicast LID.
...@@ -371,14 +372,14 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -371,14 +372,14 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
be16_to_cpu(IB_MULTICAST_LID_BASE) && be16_to_cpu(IB_MULTICAST_LID_BASE) &&
rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ? rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
cpu_to_be32(QIB_MULTICAST_QPN) : cpu_to_be32(QIB_MULTICAST_QPN) :
cpu_to_be32(wqe->ud_wr.remote_qpn); cpu_to_be32(wqe->ud_wr.wr.remote_qpn);
ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK); ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
/* /*
* Qkeys with the high order bit set mean use the * Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5). * qkey from the QP context instead of the WR (see 10.2.5).
*/ */
ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.wr.remote_qkey < 0 ?
qp->qkey : wqe->ud_wr.remote_qkey); qp->qkey : wqe->ud_wr.wr.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
done: done:
......
/* /*
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016 - 2019 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -119,8 +119,6 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr, ...@@ -119,8 +119,6 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
rdma_copy_ah_attr(&ah->attr, ah_attr); rdma_copy_ah_attr(&ah->attr, ah_attr);
atomic_set(&ah->refcount, 0);
if (dev->driver_f.notify_new_ah) if (dev->driver_f.notify_new_ah)
dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah); dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah);
...@@ -141,8 +139,6 @@ void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags) ...@@ -141,8 +139,6 @@ void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
struct rvt_ah *ah = ibah_to_rvtah(ibah); struct rvt_ah *ah = ibah_to_rvtah(ibah);
unsigned long flags; unsigned long flags;
WARN_ON_ONCE(atomic_read(&ah->refcount));
spin_lock_irqsave(&dev->n_ahs_lock, flags); spin_lock_irqsave(&dev->n_ahs_lock, flags);
dev->n_ahs_allocated--; dev->n_ahs_allocated--;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags); spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
......
...@@ -978,6 +978,51 @@ static u8 get_allowed_ops(enum ib_qp_type type) ...@@ -978,6 +978,51 @@ static u8 get_allowed_ops(enum ib_qp_type type)
IB_OPCODE_UC : IB_OPCODE_UD; IB_OPCODE_UC : IB_OPCODE_UD;
} }
/**
* free_ud_wq_attr - Clean up AH attribute cache for UD QPs
* @qp: Valid QP with allowed_ops set
*
* The rvt_swqe data structure being used is a union, so this is
* only valid for UD QPs.
*/
static void free_ud_wq_attr(struct rvt_qp *qp)
{
struct rvt_swqe *wqe;
int i;
for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
wqe = rvt_get_swqe_ptr(qp, i);
kfree(wqe->ud_wr.attr);
wqe->ud_wr.attr = NULL;
}
}
/**
* alloc_ud_wq_attr - AH attribute cache for UD QPs
* @qp: Valid QP with allowed_ops set
* @node: Numa node for allocation
*
* The rvt_swqe data structure being used is a union, so this is
* only valid for UD QPs.
*/
static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
{
struct rvt_swqe *wqe;
int i;
for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
wqe = rvt_get_swqe_ptr(qp, i);
wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
GFP_KERNEL, node);
if (!wqe->ud_wr.attr) {
free_ud_wq_attr(qp);
return -ENOMEM;
}
}
return 0;
}
/** /**
* rvt_create_qp - create a queue pair for a device * rvt_create_qp - create a queue pair for a device
* @ibpd: the protection domain who's device we create the queue pair for * @ibpd: the protection domain who's device we create the queue pair for
...@@ -1124,6 +1169,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, ...@@ -1124,6 +1169,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->s_max_sge = init_attr->cap.max_send_sge; qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = RVT_S_SIGNAL_REQ_WR; qp->s_flags = RVT_S_SIGNAL_REQ_WR;
err = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) {
ret = (ERR_PTR(err));
goto bail_driver_priv;
}
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type, init_attr->qp_type,
...@@ -1227,6 +1277,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, ...@@ -1227,6 +1277,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
bail_rq_wq: bail_rq_wq:
rvt_free_rq(&qp->r_rq); rvt_free_rq(&qp->r_rq);
free_ud_wq_attr(qp);
bail_driver_priv: bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp); rdi->driver_f.qp_priv_free(rdi, qp);
...@@ -1671,6 +1722,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -1671,6 +1722,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
kfree(qp->s_ack_queue); kfree(qp->s_ack_queue);
rdma_destroy_ah_attr(&qp->remote_ah_attr); rdma_destroy_ah_attr(&qp->remote_ah_attr);
rdma_destroy_ah_attr(&qp->alt_ah_attr); rdma_destroy_ah_attr(&qp->alt_ah_attr);
free_ud_wq_attr(qp);
vfree(qp->s_wq); vfree(qp->s_wq);
kfree(qp); kfree(qp);
return 0; return 0;
...@@ -2037,10 +2089,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -2037,10 +2089,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
*/ */
log_pmtu = qp->log_pmtu; log_pmtu = qp->log_pmtu;
if (qp->allowed_ops == IB_OPCODE_UD) { if (qp->allowed_ops == IB_OPCODE_UD) {
struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah); struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.wr.ah);
log_pmtu = ah->log_pmtu; log_pmtu = ah->log_pmtu;
atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
} }
if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) { if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
...@@ -2085,7 +2137,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -2085,7 +2137,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
bail_inval_free_ref: bail_inval_free_ref:
if (qp->allowed_ops == IB_OPCODE_UD) if (qp->allowed_ops == IB_OPCODE_UD)
atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); rdma_destroy_ah_attr(wqe->ud_wr.attr);
bail_inval_free: bail_inval_free:
/* release mr holds */ /* release mr holds */
while (j) { while (j) {
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define DEF_RDMA_VT_H #define DEF_RDMA_VT_H
/* /*
* Copyright(c) 2016 - 2018 Intel Corporation. * Copyright(c) 2016 - 2019 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -202,7 +202,6 @@ struct rvt_pd { ...@@ -202,7 +202,6 @@ struct rvt_pd {
struct rvt_ah { struct rvt_ah {
struct ib_ah ibah; struct ib_ah ibah;
struct rdma_ah_attr attr; struct rdma_ah_attr attr;
atomic_t refcount;
u8 vl; u8 vl;
u8 log_pmtu; u8 log_pmtu;
}; };
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H #define DEF_RDMAVT_INCQP_H
/* /*
* Copyright(c) 2016 - 2018 Intel Corporation. * Copyright(c) 2016 - 2019 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -157,6 +157,22 @@ ...@@ -157,6 +157,22 @@
#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
/**
* rvt_ud_wr - IB UD work plus AH cache
* @wr: valid IB work request
* @attr: pointer to an allocated AH attribute
*
* Special case the UD WR so we can keep track of the AH attributes.
*
* NOTE: This data structure is stricly ordered wr then attr. I.e the attr
* MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr.
* The copy assumes that wr is first.
*/
struct rvt_ud_wr {
struct ib_ud_wr wr;
struct rdma_ah_attr *attr;
};
/* /*
* Send work request queue entry. * Send work request queue entry.
* The size of the sg_list is determined when the QP is created and stored * The size of the sg_list is determined when the QP is created and stored
...@@ -165,7 +181,7 @@ ...@@ -165,7 +181,7 @@
struct rvt_swqe { struct rvt_swqe {
union { union {
struct ib_send_wr wr; /* don't use wr.sg_list */ struct ib_send_wr wr; /* don't use wr.sg_list */
struct ib_ud_wr ud_wr; struct rvt_ud_wr ud_wr;
struct ib_reg_wr reg_wr; struct ib_reg_wr reg_wr;
struct ib_rdma_wr rdma_wr; struct ib_rdma_wr rdma_wr;
struct ib_atomic_wr atomic_wr; struct ib_atomic_wr atomic_wr;
...@@ -700,7 +716,7 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) ...@@ -700,7 +716,7 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{ {
rvt_put_swqe(wqe); rvt_put_swqe(wqe);
if (qp->allowed_ops == IB_OPCODE_UD) if (qp->allowed_ops == IB_OPCODE_UD)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); rdma_destroy_ah_attr(wqe->ud_wr.attr);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment