Commit 7b3256e3 authored by Dean Luick's avatar Dean Luick Committed by Doug Ledford

IB/hfi1: Fix user SDMA racy user request claim

The user SDMA in-use claim bit is in the structure that gets zeroed out
once the claim is made.  Move the request in-use flag into its own bit
array and use that for atomic claims.  This cleans up the claim code and
removes any race possibility.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarDean Luick <dean.luick@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 9da7e9a7
...@@ -145,7 +145,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 ...@@ -145,7 +145,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* Last packet in the request */ /* Last packet in the request */
#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0) #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
#define SDMA_REQ_IN_USE 0 /* SDMA request flag bits */
#define SDMA_REQ_FOR_THREAD 1 #define SDMA_REQ_FOR_THREAD 1
#define SDMA_REQ_SEND_DONE 2 #define SDMA_REQ_SEND_DONE 2
#define SDMA_REQ_HAVE_AHG 3 #define SDMA_REQ_HAVE_AHG 3
...@@ -397,6 +397,11 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) ...@@ -397,6 +397,11 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
if (!pq->reqs) if (!pq->reqs)
goto pq_reqs_nomem; goto pq_reqs_nomem;
memsize = BITS_TO_LONGS(hfi1_sdma_comp_ring_size) * sizeof(long);
pq->req_in_use = kzalloc(memsize, GFP_KERNEL);
if (!pq->req_in_use)
goto pq_reqs_no_in_use;
INIT_LIST_HEAD(&pq->list); INIT_LIST_HEAD(&pq->list);
pq->dd = dd; pq->dd = dd;
pq->ctxt = uctxt->ctxt; pq->ctxt = uctxt->ctxt;
...@@ -453,6 +458,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) ...@@ -453,6 +458,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
cq_nomem: cq_nomem:
kmem_cache_destroy(pq->txreq_cache); kmem_cache_destroy(pq->txreq_cache);
pq_txreq_nomem: pq_txreq_nomem:
kfree(pq->req_in_use);
pq_reqs_no_in_use:
kfree(pq->reqs); kfree(pq->reqs);
pq_reqs_nomem: pq_reqs_nomem:
kfree(pq); kfree(pq);
...@@ -484,6 +491,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) ...@@ -484,6 +491,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
pq->wait, pq->wait,
(ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs); kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache); kmem_cache_destroy(pq->txreq_cache);
kfree(pq); kfree(pq);
fd->pq = NULL; fd->pq = NULL;
...@@ -572,29 +580,27 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, ...@@ -572,29 +580,27 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
return -EINVAL; return -EINVAL;
} }
if (cq->comps[info.comp_idx].status == QUEUED ||
test_bit(SDMA_REQ_IN_USE, &pq->reqs[info.comp_idx].flags)) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
dd->unit, uctxt->ctxt, fd->subctxt,
info.comp_idx);
return -EBADSLT;
}
if (!info.fragsize) { if (!info.fragsize) {
hfi1_cdbg(SDMA, hfi1_cdbg(SDMA,
"[%u:%u:%u:%u] Request does not specify fragsize", "[%u:%u:%u:%u] Request does not specify fragsize",
dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
return -EINVAL; return -EINVAL;
} }
/* Try to claim the request. */
if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
dd->unit, uctxt->ctxt, fd->subctxt,
info.comp_idx);
return -EBADSLT;
}
/* /*
* We've done all the safety checks that we can up to this point, * All safety checks have been done and this request has been claimed.
* "allocate" the request entry.
*/ */
hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit, hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
uctxt->ctxt, fd->subctxt, info.comp_idx); uctxt->ctxt, fd->subctxt, info.comp_idx);
req = pq->reqs + info.comp_idx; req = pq->reqs + info.comp_idx;
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
/* Mark the request as IN_USE before we start filling it in. */
set_bit(SDMA_REQ_IN_USE, &req->flags);
req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
req->pq = pq; req->pq = pq;
req->cq = cq; req->cq = cq;
...@@ -1612,7 +1618,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) ...@@ -1612,7 +1618,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
} }
} }
kfree(req->tids); kfree(req->tids);
clear_bit(SDMA_REQ_IN_USE, &req->flags); clear_bit(req->info.comp_idx, req->pq->req_in_use);
} }
static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
......
...@@ -63,6 +63,7 @@ struct hfi1_user_sdma_pkt_q { ...@@ -63,6 +63,7 @@ struct hfi1_user_sdma_pkt_q {
struct hfi1_devdata *dd; struct hfi1_devdata *dd;
struct kmem_cache *txreq_cache; struct kmem_cache *txreq_cache;
struct user_sdma_request *reqs; struct user_sdma_request *reqs;
unsigned long *req_in_use;
struct iowait busy; struct iowait busy;
unsigned state; unsigned state;
wait_queue_head_t wait; wait_queue_head_t wait;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment