Commit 22c9cc24 authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Jason Gunthorpe

RDMA/rdmavt: Remove FMR memory registration

Use FRWR method to register memory by default and remove the ancient and
unsafe FMR method.

Link: https://lore.kernel.org/r/10-v3-f58e6669d5d3+2cf-fmr_removal_jgg@mellanox.comSigned-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Tested-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Acked-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d6747b37
...@@ -713,160 +713,6 @@ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey) ...@@ -713,160 +713,6 @@ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
} }
EXPORT_SYMBOL(rvt_invalidate_rkey); EXPORT_SYMBOL(rvt_invalidate_rkey);
/**
* rvt_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
* @fmr_attr: fast memory region attributes
*
* Return: the memory region on success, otherwise returns an errno.
*/
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct rvt_fmr *fmr;
int m;
struct ib_fmr *ret;
int rval = -ENOMEM;
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
if (!fmr)
goto bail;
rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
PERCPU_REF_INIT_ATOMIC);
if (rval)
goto bail;
/*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey.
*/
rval = rvt_alloc_lkey(&fmr->mr, 0);
if (rval)
goto bail_mregion;
fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey;
/*
* Resources are allocated but no valid mapping (RKEY can't be
* used).
*/
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
fmr->mr.page_shift = fmr_attr->page_shift;
ret = &fmr->ibfmr;
done:
return ret;
bail_mregion:
rvt_deinit_mregion(&fmr->mr);
bail:
kfree(fmr);
ret = ERR_PTR(rval);
goto done;
}
/**
* rvt_map_phys_fmr - set up a fast memory region
* @ibfmr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
*
* This may be called from interrupt context.
*
* Return: 0 on success
*/
int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct rvt_fmr *fmr = to_ifmr(ibfmr);
struct rvt_lkey_table *rkt;
unsigned long flags;
int m, n;
unsigned long i;
u32 ps;
struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
i = atomic_long_read(&fmr->mr.refcount.count);
if (i > 2)
return -EBUSY;
if (list_len > fmr->mr.max_segs)
return -EINVAL;
rkt = &rdi->lkey_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
ps = 1 << fmr->mr.page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
if (++n == RVT_SEGSZ) {
m++;
n = 0;
}
}
spin_unlock_irqrestore(&rkt->lock, flags);
return 0;
}
/**
* rvt_unmap_fmr - unmap fast memory regions
* @fmr_list: the list of fast memory regions to unmap
*
* Return: 0 on success.
*/
int rvt_unmap_fmr(struct list_head *fmr_list)
{
struct rvt_fmr *fmr;
struct rvt_lkey_table *rkt;
unsigned long flags;
struct rvt_dev_info *rdi;
list_for_each_entry(fmr, fmr_list, ibfmr.list) {
rdi = ib_to_rvt(fmr->ibfmr.device);
rkt = &rdi->lkey_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
spin_unlock_irqrestore(&rkt->lock, flags);
}
return 0;
}
/**
* rvt_dealloc_fmr - deallocate a fast memory region
* @ibfmr: the fast memory region to deallocate
*
* Return: 0 on success.
*/
int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct rvt_fmr *fmr = to_ifmr(ibfmr);
int ret = 0;
rvt_free_lkey(&fmr->mr);
rvt_put_mr(&fmr->mr); /* will set completion if last */
ret = rvt_check_refs(&fmr->mr, __func__);
if (ret)
goto out;
rvt_deinit_mregion(&fmr->mr);
kfree(fmr);
out:
return ret;
}
/** /**
* rvt_sge_adjacent - is isge compressible * rvt_sge_adjacent - is isge compressible
* @last_sge: last outgoing SGE written * @last_sge: last outgoing SGE written
......
...@@ -49,10 +49,6 @@ ...@@ -49,10 +49,6 @@
*/ */
#include <rdma/rdma_vt.h> #include <rdma/rdma_vt.h>
struct rvt_fmr {
struct ib_fmr ibfmr;
struct rvt_mregion mr; /* must be last */
};
struct rvt_mr { struct rvt_mr {
struct ib_mr ibmr; struct ib_mr ibmr;
...@@ -60,11 +56,6 @@ struct rvt_mr { ...@@ -60,11 +56,6 @@ struct rvt_mr {
struct rvt_mregion mr; /* must be last */ struct rvt_mregion mr; /* must be last */
}; };
static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct rvt_fmr, ibfmr);
}
static inline struct rvt_mr *to_imr(struct ib_mr *ibmr) static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
{ {
return container_of(ibmr, struct rvt_mr, ibmr); return container_of(ibmr, struct rvt_mr, ibmr);
...@@ -83,11 +74,5 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, ...@@ -83,11 +74,5 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata); u32 max_num_sg, struct ib_udata *udata);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset); int sg_nents, unsigned int *sg_offset);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova);
int rvt_unmap_fmr(struct list_head *fmr_list);
int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
#endif /* DEF_RVTMR_H */ #endif /* DEF_RVTMR_H */
...@@ -378,7 +378,6 @@ enum { ...@@ -378,7 +378,6 @@ enum {
static const struct ib_device_ops rvt_dev_ops = { static const struct ib_device_ops rvt_dev_ops = {
.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION, .uverbs_abi_ver = RVT_UVERBS_ABI_VERSION,
.alloc_fmr = rvt_alloc_fmr,
.alloc_mr = rvt_alloc_mr, .alloc_mr = rvt_alloc_mr,
.alloc_pd = rvt_alloc_pd, .alloc_pd = rvt_alloc_pd,
.alloc_ucontext = rvt_alloc_ucontext, .alloc_ucontext = rvt_alloc_ucontext,
...@@ -387,7 +386,6 @@ static const struct ib_device_ops rvt_dev_ops = { ...@@ -387,7 +386,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.create_cq = rvt_create_cq, .create_cq = rvt_create_cq,
.create_qp = rvt_create_qp, .create_qp = rvt_create_qp,
.create_srq = rvt_create_srq, .create_srq = rvt_create_srq,
.dealloc_fmr = rvt_dealloc_fmr,
.dealloc_pd = rvt_dealloc_pd, .dealloc_pd = rvt_dealloc_pd,
.dealloc_ucontext = rvt_dealloc_ucontext, .dealloc_ucontext = rvt_dealloc_ucontext,
.dereg_mr = rvt_dereg_mr, .dereg_mr = rvt_dereg_mr,
...@@ -399,7 +397,6 @@ static const struct ib_device_ops rvt_dev_ops = { ...@@ -399,7 +397,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.get_dma_mr = rvt_get_dma_mr, .get_dma_mr = rvt_get_dma_mr,
.get_port_immutable = rvt_get_port_immutable, .get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg, .map_mr_sg = rvt_map_mr_sg,
.map_phys_fmr = rvt_map_phys_fmr,
.mmap = rvt_mmap, .mmap = rvt_mmap,
.modify_ah = rvt_modify_ah, .modify_ah = rvt_modify_ah,
.modify_device = rvt_modify_device, .modify_device = rvt_modify_device,
...@@ -420,7 +417,6 @@ static const struct ib_device_ops rvt_dev_ops = { ...@@ -420,7 +417,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.reg_user_mr = rvt_reg_user_mr, .reg_user_mr = rvt_reg_user_mr,
.req_notify_cq = rvt_req_notify_cq, .req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq, .resize_cq = rvt_resize_cq,
.unmap_fmr = rvt_unmap_fmr,
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment