Commit f04d5b3d authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA-rxe: Isolate mr code from atomic_reply()

Isolate mr specific code from atomic_reply() in rxe_resp.c into
a subroutine rxe_mr_do_atomic_op() in rxe_mr.c.
Minor cleanups to rxe_check_range() and iova_to_vaddr().
Move enum resp_state to rxe.h

Link: https://lore.kernel.org/r/20230119235936.19728-4-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent db4729a5
...@@ -57,6 +57,44 @@ ...@@ -57,6 +57,44 @@
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \ #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
/* responder states */
enum resp_states {
RESPST_NONE,
RESPST_GET_REQ,
RESPST_CHK_PSN,
RESPST_CHK_OP_SEQ,
RESPST_CHK_OP_VALID,
RESPST_CHK_RESOURCE,
RESPST_CHK_LENGTH,
RESPST_CHK_RKEY,
RESPST_EXECUTE,
RESPST_READ_REPLY,
RESPST_ATOMIC_REPLY,
RESPST_ATOMIC_WRITE_REPLY,
RESPST_PROCESS_FLUSH,
RESPST_COMPLETE,
RESPST_ACKNOWLEDGE,
RESPST_CLEANUP,
RESPST_DUPLICATE_REQUEST,
RESPST_ERR_MALFORMED_WQE,
RESPST_ERR_UNSUPPORTED_OPCODE,
RESPST_ERR_MISALIGNED_ATOMIC,
RESPST_ERR_PSN_OUT_OF_SEQ,
RESPST_ERR_MISSING_OPCODE_FIRST,
RESPST_ERR_MISSING_OPCODE_LAST_C,
RESPST_ERR_MISSING_OPCODE_LAST_D1E,
RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
RESPST_ERR_RNR,
RESPST_ERR_RKEY_VIOLATION,
RESPST_ERR_INVALIDATE_RKEY,
RESPST_ERR_LENGTH,
RESPST_ERR_CQ_OVERFLOW,
RESPST_ERROR,
RESPST_RESET,
RESPST_DONE,
RESPST_EXIT,
};
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name); int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
......
...@@ -72,6 +72,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, ...@@ -72,6 +72,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset); int sg_nents, unsigned int *sg_offset);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length); void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 compare, u64 swap_add, u64 *orig_val);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type); enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
......
...@@ -32,13 +32,15 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) ...@@ -32,13 +32,15 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER: case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG: case IB_MR_TYPE_MEM_REG:
if (iova < mr->ibmr.iova || length > mr->ibmr.length || if (iova < mr->ibmr.iova ||
iova > mr->ibmr.iova + mr->ibmr.length - length) iova + length > mr->ibmr.iova + mr->ibmr.length) {
return -EFAULT; rxe_dbg_mr(mr, "iova/length out of range");
return -EINVAL;
}
return 0; return 0;
default: default:
rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type); rxe_dbg_mr(mr, "mr type not supported\n");
return -EINVAL; return -EINVAL;
} }
} }
...@@ -299,37 +301,22 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) ...@@ -299,37 +301,22 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{ {
size_t offset; size_t offset;
int m, n; int m, n;
void *addr;
if (mr->state != RXE_MR_STATE_VALID) { if (mr->state != RXE_MR_STATE_VALID)
rxe_dbg_mr(mr, "Not in valid state\n"); return NULL;
addr = NULL;
goto out;
}
if (!mr->map) { if (mr->ibmr.type == IB_MR_TYPE_DMA)
addr = (void *)(uintptr_t)iova; return (void *)(uintptr_t)iova;
goto out;
}
if (mr_check_range(mr, iova, length)) { if (mr_check_range(mr, iova, length))
rxe_dbg_mr(mr, "Range violation\n"); return NULL;
addr = NULL;
goto out;
}
lookup_iova(mr, iova, &m, &n, &offset); lookup_iova(mr, iova, &m, &n, &offset);
if (offset + length > mr->map[m]->buf[n].size) { if (offset + length > mr->map[m]->buf[n].size)
rxe_dbg_mr(mr, "Crosses page boundary\n"); return NULL;
addr = NULL;
goto out;
}
addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out: return (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
return addr;
} }
int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length) int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
...@@ -538,6 +525,46 @@ int copy_data( ...@@ -538,6 +525,46 @@ int copy_data(
return err; return err;
} }
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 compare, u64 swap_add, u64 *orig_val)
{
u64 *va;
u64 value;
if (mr->state != RXE_MR_STATE_VALID) {
rxe_dbg_mr(mr, "mr not in valid state");
return RESPST_ERR_RKEY_VIOLATION;
}
va = iova_to_vaddr(mr, iova, sizeof(u64));
if (!va) {
rxe_dbg_mr(mr, "iova out of range");
return RESPST_ERR_RKEY_VIOLATION;
}
if ((uintptr_t)va & 0x7) {
rxe_dbg_mr(mr, "iova not aligned");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
spin_lock_bh(&atomic_ops_lock);
value = *orig_val = *va;
if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
if (value == compare)
*va = swap_add;
} else {
value += swap_add;
*va = value;
}
spin_unlock_bh(&atomic_ops_lock);
return 0;
}
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{ {
struct rxe_sge *sge = &dma->sge[dma->cur_sge]; struct rxe_sge *sge = &dma->sge[dma->cur_sge];
......
...@@ -10,43 +10,6 @@ ...@@ -10,43 +10,6 @@
#include "rxe_loc.h" #include "rxe_loc.h"
#include "rxe_queue.h" #include "rxe_queue.h"
enum resp_states {
RESPST_NONE,
RESPST_GET_REQ,
RESPST_CHK_PSN,
RESPST_CHK_OP_SEQ,
RESPST_CHK_OP_VALID,
RESPST_CHK_RESOURCE,
RESPST_CHK_LENGTH,
RESPST_CHK_RKEY,
RESPST_EXECUTE,
RESPST_READ_REPLY,
RESPST_ATOMIC_REPLY,
RESPST_ATOMIC_WRITE_REPLY,
RESPST_PROCESS_FLUSH,
RESPST_COMPLETE,
RESPST_ACKNOWLEDGE,
RESPST_CLEANUP,
RESPST_DUPLICATE_REQUEST,
RESPST_ERR_MALFORMED_WQE,
RESPST_ERR_UNSUPPORTED_OPCODE,
RESPST_ERR_MISALIGNED_ATOMIC,
RESPST_ERR_PSN_OUT_OF_SEQ,
RESPST_ERR_MISSING_OPCODE_FIRST,
RESPST_ERR_MISSING_OPCODE_LAST_C,
RESPST_ERR_MISSING_OPCODE_LAST_D1E,
RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
RESPST_ERR_RNR,
RESPST_ERR_RKEY_VIOLATION,
RESPST_ERR_INVALIDATE_RKEY,
RESPST_ERR_LENGTH,
RESPST_ERR_CQ_OVERFLOW,
RESPST_ERROR,
RESPST_RESET,
RESPST_DONE,
RESPST_EXIT,
};
static char *resp_state_name[] = { static char *resp_state_name[] = {
[RESPST_NONE] = "NONE", [RESPST_NONE] = "NONE",
[RESPST_GET_REQ] = "GET_REQ", [RESPST_GET_REQ] = "GET_REQ",
...@@ -725,17 +688,12 @@ static enum resp_states process_flush(struct rxe_qp *qp, ...@@ -725,17 +688,12 @@ static enum resp_states process_flush(struct rxe_qp *qp,
return RESPST_ACKNOWLEDGE; return RESPST_ACKNOWLEDGE;
} }
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
static enum resp_states atomic_reply(struct rxe_qp *qp, static enum resp_states atomic_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt) struct rxe_pkt_info *pkt)
{ {
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr; struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res; struct resp_res *res = qp->resp.res;
u64 value; int err;
if (!res) { if (!res) {
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK); res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
...@@ -743,32 +701,14 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, ...@@ -743,32 +701,14 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
} }
if (!res->replay) { if (!res->replay) {
if (mr->state != RXE_MR_STATE_VALID) { u64 iova = qp->resp.va + qp->resp.offset;
ret = RESPST_ERR_RKEY_VIOLATION;
goto out;
}
vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
sizeof(u64));
/* check vaddr is 8 bytes aligned. */
if (!vaddr || (uintptr_t)vaddr & 7) {
ret = RESPST_ERR_MISALIGNED_ATOMIC;
goto out;
}
spin_lock_bh(&atomic_ops_lock);
res->atomic.orig_val = value = *vaddr;
if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
if (value == atmeth_comp(pkt))
value = atmeth_swap_add(pkt);
} else {
value += atmeth_swap_add(pkt);
}
*vaddr = value; err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
spin_unlock_bh(&atomic_ops_lock); atmeth_comp(pkt),
atmeth_swap_add(pkt),
&res->atomic.orig_val);
if (err)
return err;
qp->resp.msn++; qp->resp.msn++;
...@@ -780,9 +720,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, ...@@ -780,9 +720,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
qp->resp.status = IB_WC_SUCCESS; qp->resp.status = IB_WC_SUCCESS;
} }
ret = RESPST_ACKNOWLEDGE; return RESPST_ACKNOWLEDGE;
out:
return ret;
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment