Commit 12137c59 authored by Stefan Roscher's avatar Stefan Roscher Committed by Roland Dreier

IB/ehca: Wait for async events to finish before destroying QP

This is necessary because, in a multicore environment, a race between
uverbs async handler and destroy QP could occur.

Signed-off-by: Stefan Roscher <stefan.roscher at de.ibm.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent ab69b3cf
...@@ -192,6 +192,8 @@ struct ehca_qp { ...@@ -192,6 +192,8 @@ struct ehca_qp {
int mtu_shift; int mtu_shift;
u32 message_count; u32 message_count;
u32 packet_count; u32 packet_count;
atomic_t nr_events; /* events seen */
wait_queue_head_t wait_completion;
}; };
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
......
...@@ -204,6 +204,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe, ...@@ -204,6 +204,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
read_lock(&ehca_qp_idr_lock); read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token); qp = idr_find(&ehca_qp_idr, token);
if (qp)
atomic_inc(&qp->nr_events);
read_unlock(&ehca_qp_idr_lock); read_unlock(&ehca_qp_idr_lock);
if (!qp) if (!qp)
...@@ -223,6 +225,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe, ...@@ -223,6 +225,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
if (fatal && qp->ext_type == EQPT_SRQBASE) if (fatal && qp->ext_type == EQPT_SRQBASE)
dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
if (atomic_dec_and_test(&qp->nr_events))
wake_up(&qp->wait_completion);
return; return;
} }
......
...@@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp( ...@@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp(
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
atomic_set(&my_qp->nr_events, 0);
init_waitqueue_head(&my_qp->wait_completion);
spin_lock_init(&my_qp->spinlock_s); spin_lock_init(&my_qp->spinlock_s);
spin_lock_init(&my_qp->spinlock_r); spin_lock_init(&my_qp->spinlock_r);
my_qp->qp_type = qp_type; my_qp->qp_type = qp_type;
...@@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, ...@@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
idr_remove(&ehca_qp_idr, my_qp->token); idr_remove(&ehca_qp_idr, my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags); write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
/* now wait until all pending events have completed */
wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) { if (h_ret != H_SUCCESS) {
ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment