Commit 4a353399 authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe

RDMA/umem: Add API to find best driver supported page size in an MR

This helper iterates through the SG list to find the best page size to use
from a bitmap of HW supported page sizes. Drivers that support multiple
page sizes, but not mixed sizes in an MR can use this API.
Suggested-by: default avatarJason Gunthorpe <jgg@ziepe.ca>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 4c4b1996
......@@ -130,6 +130,57 @@ static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
return sg;
}
/**
* ib_umem_find_best_pgsz - Find best HW page size to use for this MR
*
* @umem: umem struct
* @pgsz_bitmap: bitmap of HW supported page sizes
* @virt: IOVA
*
* This helper is intended for HW that support multiple page
* sizes but can do only a single page size in an MR.
*
* Returns 0 if the umem requires page sizes not supported by
* the driver to be mapped. Drivers always supporting PAGE_SIZE
* or smaller will never see a 0 result.
*/
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt)
{
struct scatterlist *sg;
unsigned int best_pg_bit;
unsigned long va, pgoff;
dma_addr_t mask;
int i;
/* At minimum, drivers must support PAGE_SIZE or smaller */
if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
return 0;
va = virt;
/* max page size not to exceed MR length */
mask = roundup_pow_of_two(umem->length);
/* offset into first SGL */
pgoff = umem->address & ~PAGE_MASK;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
/* Walk SGL and reduce max page size if VA/PA bits differ
* for any address.
*/
mask |= (sg_dma_address(sg) + pgoff) ^ va;
if (i && i != (umem->nmap - 1))
/* restrict by length as well for interior SGEs */
mask |= sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
pgoff = 0;
}
best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
return BIT_ULL(best_pg_bit);
}
EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
......
......@@ -87,6 +87,9 @@ void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length);
unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt);
#else /* CONFIG_INFINIBAND_USER_MEM */
......@@ -104,6 +107,12 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
size_t length) {
return -EINVAL;
}
static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt) {
return -EINVAL;
}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
......@@ -3250,6 +3250,30 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
return rdma_protocol_iwarp(dev, port_num);
}
/**
* rdma_find_pg_bit - Find page bit given address and HW supported page sizes
*
* @addr: address
* @pgsz_bitmap: bitmap of HW supported page sizes
*/
static inline unsigned int rdma_find_pg_bit(unsigned long addr,
unsigned long pgsz_bitmap)
{
unsigned long align;
unsigned long pgsz;
align = addr & -addr;
/* Find page bit such that addr is aligned to the highest supported
* HW page size
*/
pgsz = pgsz_bitmap & ~(-align << 1);
if (!pgsz)
return __ffs(pgsz_bitmap);
return __fls(pgsz);
}
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment