Commit 5aad2145 authored by Nicholas Bellinger's avatar Nicholas Bellinger

Merge branch 'for-next' of...

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband into for-next
parents fa389e22 2dea9094
...@@ -42,29 +42,29 @@ ...@@ -42,29 +42,29 @@
#include "uverbs.h" #include "uverbs.h"
#define IB_UMEM_MAX_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
(void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{ {
struct ib_umem_chunk *chunk, *tmp; struct scatterlist *sg;
struct page *page;
int i; int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { if (umem->nmap > 0)
ib_dma_unmap_sg(dev, chunk->page_list, ib_dma_unmap_sg(dev, umem->sg_head.sgl,
chunk->nents, DMA_BIDIRECTIONAL); umem->nmap,
for (i = 0; i < chunk->nents; ++i) { DMA_BIDIRECTIONAL);
struct page *page = sg_page(&chunk->page_list[i]);
if (umem->writable && dirty) for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
set_page_dirty_lock(page);
put_page(page);
}
kfree(chunk); page = sg_page(sg);
if (umem->writable && dirty)
set_page_dirty_lock(page);
put_page(page);
} }
sg_free_table(&umem->sg_head);
return;
} }
/** /**
...@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct ib_umem *umem; struct ib_umem *umem;
struct page **page_list; struct page **page_list;
struct vm_area_struct **vma_list; struct vm_area_struct **vma_list;
struct ib_umem_chunk *chunk;
unsigned long locked; unsigned long locked;
unsigned long lock_limit; unsigned long lock_limit;
unsigned long cur_base; unsigned long cur_base;
unsigned long npages; unsigned long npages;
int ret; int ret;
int off;
int i; int i;
DEFINE_DMA_ATTRS(attrs); DEFINE_DMA_ATTRS(attrs);
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
if (dmasync) if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
...@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock()) if (!can_do_mlock())
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
umem = kmalloc(sizeof *umem, GFP_KERNEL); umem = kzalloc(sizeof *umem, GFP_KERNEL);
if (!umem) if (!umem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
/* We assume the memory is from hugetlb until proved otherwise */ /* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1; umem->hugetlb = 1;
INIT_LIST_HEAD(&umem->chunk_list);
page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) { if (!page_list) {
kfree(umem); kfree(umem);
...@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK; cur_base = addr & PAGE_MASK;
ret = 0; if (npages == 0) {
ret = -EINVAL;
goto out;
}
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
goto out;
need_release = 1;
sg_list_start = umem->sg_head.sgl;
while (npages) { while (npages) {
ret = get_user_pages(current, current->mm, cur_base, ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
...@@ -157,54 +166,38 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -157,54 +166,38 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret < 0) if (ret < 0)
goto out; goto out;
umem->npages += ret;
cur_base += ret * PAGE_SIZE; cur_base += ret * PAGE_SIZE;
npages -= ret; npages -= ret;
off = 0; for_each_sg(sg_list_start, sg, ret, i) {
if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
while (ret) { umem->hugetlb = 0;
chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK), sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
GFP_KERNEL);
if (!chunk) {
ret = -ENOMEM;
goto out;
}
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
sg_init_table(chunk->page_list, chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (vma_list &&
!is_vm_hugetlb_page(vma_list[i + off]))
umem->hugetlb = 0;
sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
}
chunk->nmap = ib_dma_map_sg_attrs(context->device,
&chunk->page_list[0],
chunk->nents,
DMA_BIDIRECTIONAL,
&attrs);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(sg_page(&chunk->page_list[i]));
kfree(chunk);
ret = -ENOMEM;
goto out;
}
ret -= chunk->nents;
off += chunk->nents;
list_add_tail(&chunk->list, &umem->chunk_list);
} }
ret = 0; /* preparing for next loop */
sg_list_start = sg;
} }
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
umem->npages,
DMA_BIDIRECTIONAL,
&attrs);
if (umem->nmap <= 0) {
ret = -ENOMEM;
goto out;
}
ret = 0;
out: out:
if (ret < 0) { if (ret < 0) {
__ib_umem_release(context->device, umem, 0); if (need_release)
__ib_umem_release(context->device, umem, 0);
kfree(umem); kfree(umem);
} else } else
current->mm->pinned_vm = locked; current->mm->pinned_vm = locked;
...@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release); ...@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem) int ib_umem_page_count(struct ib_umem *umem)
{ {
struct ib_umem_chunk *chunk;
int shift; int shift;
int i; int i;
int n; int n;
struct scatterlist *sg;
shift = ilog2(umem->page_size); shift = ilog2(umem->page_size);
n = 0; n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
for (i = 0; i < chunk->nmap; ++i) n += sg_dma_len(sg) >> shift;
n += sg_dma_len(&chunk->page_list[i]) >> shift;
return n; return n;
} }
......
...@@ -1169,6 +1169,45 @@ int ib_dereg_mr(struct ib_mr *mr) ...@@ -1169,6 +1169,45 @@ int ib_dereg_mr(struct ib_mr *mr)
} }
EXPORT_SYMBOL(ib_dereg_mr); EXPORT_SYMBOL(ib_dereg_mr);
struct ib_mr *ib_create_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr)
{
struct ib_mr *mr;
if (!pd->device->create_mr)
return ERR_PTR(-ENOSYS);
mr = pd->device->create_mr(pd, mr_init_attr);
if (!IS_ERR(mr)) {
mr->device = pd->device;
mr->pd = pd;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
atomic_set(&mr->usecnt, 0);
}
return mr;
}
EXPORT_SYMBOL(ib_create_mr);
int ib_destroy_mr(struct ib_mr *mr)
{
struct ib_pd *pd;
int ret;
if (atomic_read(&mr->usecnt))
return -EBUSY;
pd = mr->pd;
ret = mr->device->destroy_mr(mr);
if (!ret)
atomic_dec(&pd->usecnt);
return ret;
}
EXPORT_SYMBOL(ib_destroy_mr);
struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{ {
struct ib_mr *mr; struct ib_mr *mr;
...@@ -1398,3 +1437,11 @@ int ib_destroy_flow(struct ib_flow *flow_id) ...@@ -1398,3 +1437,11 @@ int ib_destroy_flow(struct ib_flow *flow_id)
return err; return err;
} }
EXPORT_SYMBOL(ib_destroy_flow); EXPORT_SYMBOL(ib_destroy_flow);
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
return mr->device->check_mr_status ?
mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
}
EXPORT_SYMBOL(ib_check_mr_status);
...@@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 *pages; u64 *pages;
u64 kva = 0; u64 kva = 0;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct c2_pd *c2pd = to_c2pd(pd); struct c2_pd *c2pd = to_c2pd(pd);
struct c2_mr *c2mr; struct c2_mr *c2mr;
...@@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
shift = ffs(c2mr->umem->page_size) - 1; shift = ffs(c2mr->umem->page_size) - 1;
n = c2mr->umem->nmap;
n = 0;
list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
n += chunk->nents;
pages = kmalloc(n * sizeof(u64), GFP_KERNEL); pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
if (!pages) { if (!pages) {
...@@ -464,14 +461,12 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -464,14 +461,12 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
i = 0; i = 0;
list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) { for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] =
pages[i++] = sg_dma_address(sg) +
sg_dma_address(&chunk->page_list[j]) + (c2mr->umem->page_size * k);
(c2mr->umem->page_size * k);
}
} }
} }
......
...@@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
__be64 *pages; __be64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
struct ib_umem_chunk *chunk;
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_pd *php; struct iwch_pd *php;
struct iwch_mr *mhp; struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp; struct iwch_reg_user_mr_resp uresp;
struct scatterlist *sg;
PDBG("%s ib_pd %p\n", __func__, pd); PDBG("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd); php = to_iwch_pd(pd);
...@@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1; shift = ffs(mhp->umem->page_size) - 1;
n = 0; n = mhp->umem->nmap;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
err = iwch_alloc_pbl(mhp, n); err = iwch_alloc_pbl(mhp, n);
if (err) if (err)
...@@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address( pages[i++] = cpu_to_be64(sg_dma_address(sg) +
&chunk->page_list[j]) +
mhp->umem->page_size * k); mhp->umem->page_size * k);
if (i == PAGE_SIZE / sizeof *pages) { if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
...@@ -676,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -676,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = 0; i = 0;
} }
} }
} }
if (i) if (i)
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
......
...@@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
__be64 *pages; __be64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
...@@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1; shift = ffs(mhp->umem->page_size) - 1;
n = 0; n = mhp->umem->nmap;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
err = alloc_pbl(mhp, n); err = alloc_pbl(mhp, n);
if (err) if (err)
goto err; goto err;
...@@ -726,24 +723,22 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -726,24 +723,22 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] = cpu_to_be64(sg_dma_address(sg) +
pages[i++] = cpu_to_be64(sg_dma_address( mhp->umem->page_size * k);
&chunk->page_list[j]) + if (i == PAGE_SIZE / sizeof *pages) {
mhp->umem->page_size * k); err = write_pbl(&mhp->rhp->rdev,
if (i == PAGE_SIZE / sizeof *pages) { pages,
err = write_pbl(&mhp->rhp->rdev, mhp->attr.pbl_addr + (n << 3), i);
pages, if (err)
mhp->attr.pbl_addr + (n << 3), i); goto pbl_done;
if (err) n += i;
goto pbl_done; i = 0;
n += i;
i = 0;
}
} }
} }
}
if (i) if (i)
err = write_pbl(&mhp->rhp->rdev, pages, err = write_pbl(&mhp->rhp->rdev, pages,
......
...@@ -322,7 +322,7 @@ struct ehca_mr_pginfo { ...@@ -322,7 +322,7 @@ struct ehca_mr_pginfo {
} phy; } phy;
struct { /* type EHCA_MR_PGI_USER section */ struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region; struct ib_umem *region;
struct ib_umem_chunk *next_chunk; struct scatterlist *next_sg;
u64 next_nmap; u64 next_nmap;
} usr; } usr;
struct { /* type EHCA_MR_PGI_FMR section */ struct { /* type EHCA_MR_PGI_FMR section */
......
...@@ -400,10 +400,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -400,10 +400,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pginfo.num_hwpages = num_hwpages; pginfo.num_hwpages = num_hwpages;
pginfo.u.usr.region = e_mr->umem; pginfo.u.usr.region = e_mr->umem;
pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
(&e_mr->umem->chunk_list),
list);
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
&e_mr->ib.ib_mr.rkey, EHCA_REG_MR); &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
...@@ -1858,61 +1855,39 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, ...@@ -1858,61 +1855,39 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
u64 *kpage) u64 *kpage)
{ {
int ret = 0; int ret = 0;
struct ib_umem_chunk *prev_chunk;
struct ib_umem_chunk *chunk;
u64 pgaddr; u64 pgaddr;
u32 i = 0;
u32 j = 0; u32 j = 0;
int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
struct scatterlist **sg = &pginfo->u.usr.next_sg;
/* loop over desired chunk entries */
chunk = pginfo->u.usr.next_chunk; while (*sg != NULL) {
prev_chunk = pginfo->u.usr.next_chunk; pgaddr = page_to_pfn(sg_page(*sg))
list_for_each_entry_continue( << PAGE_SHIFT;
chunk, (&(pginfo->u.usr.region->chunk_list)), list) { *kpage = pgaddr + (pginfo->next_hwpage *
for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { pginfo->hwpage_size);
pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) if (!(*kpage)) {
<< PAGE_SHIFT ; ehca_gen_err("pgaddr=%llx "
*kpage = pgaddr + (pginfo->next_hwpage * "sg_dma_address=%llx "
pginfo->hwpage_size); "entry=%llx next_hwpage=%llx",
if ( !(*kpage) ) { pgaddr, (u64)sg_dma_address(*sg),
ehca_gen_err("pgaddr=%llx " pginfo->u.usr.next_nmap,
"chunk->page_list[i]=%llx " pginfo->next_hwpage);
"i=%x next_hwpage=%llx", return -EFAULT;
pgaddr, (u64)sg_dma_address(
&chunk->page_list[i]),
i, pginfo->next_hwpage);
return -EFAULT;
}
(pginfo->hwpage_cnt)++;
(pginfo->next_hwpage)++;
kpage++;
if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
(pginfo->kpage_cnt)++;
(pginfo->u.usr.next_nmap)++;
pginfo->next_hwpage = 0;
i++;
}
j++;
if (j >= number) break;
} }
if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (pginfo->hwpage_cnt)++;
(j >= number)) { (pginfo->next_hwpage)++;
pginfo->u.usr.next_nmap = 0; kpage++;
prev_chunk = chunk; if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
break; (pginfo->kpage_cnt)++;
} else if (pginfo->u.usr.next_nmap >= chunk->nmap) { (pginfo->u.usr.next_nmap)++;
pginfo->u.usr.next_nmap = 0; pginfo->next_hwpage = 0;
prev_chunk = chunk; *sg = sg_next(*sg);
} else if (j >= number) }
j++;
if (j >= number)
break; break;
else
prev_chunk = chunk;
} }
pginfo->u.usr.next_chunk =
list_prepare_entry(prev_chunk,
(&(pginfo->u.usr.region->chunk_list)),
list);
return ret; return ret;
} }
...@@ -1920,20 +1895,19 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, ...@@ -1920,20 +1895,19 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
* check given pages for contiguous layout * check given pages for contiguous layout
* last page addr is returned in prev_pgaddr for further check * last page addr is returned in prev_pgaddr for further check
*/ */
static int ehca_check_kpages_per_ate(struct scatterlist *page_list, static int ehca_check_kpages_per_ate(struct scatterlist **sg,
int start_idx, int end_idx, int num_pages,
u64 *prev_pgaddr) u64 *prev_pgaddr)
{ {
int t; for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
for (t = start_idx; t <= end_idx; t++) { u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
if (ehca_debug_level >= 3) if (ehca_debug_level >= 3)
ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
*(u64 *)__va(pgaddr)); *(u64 *)__va(pgaddr));
if (pgaddr - PAGE_SIZE != *prev_pgaddr) { if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
ehca_gen_err("uncontiguous page found pgaddr=%llx " ehca_gen_err("uncontiguous page found pgaddr=%llx "
"prev_pgaddr=%llx page_list_i=%x", "prev_pgaddr=%llx entries_left_in_hwpage=%x",
pgaddr, *prev_pgaddr, t); pgaddr, *prev_pgaddr, num_pages);
return -EINVAL; return -EINVAL;
} }
*prev_pgaddr = pgaddr; *prev_pgaddr = pgaddr;
...@@ -1947,111 +1921,80 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, ...@@ -1947,111 +1921,80 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
u64 *kpage) u64 *kpage)
{ {
int ret = 0; int ret = 0;
struct ib_umem_chunk *prev_chunk;
struct ib_umem_chunk *chunk;
u64 pgaddr, prev_pgaddr; u64 pgaddr, prev_pgaddr;
u32 i = 0;
u32 j = 0; u32 j = 0;
int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
int nr_kpages = kpages_per_hwpage; int nr_kpages = kpages_per_hwpage;
struct scatterlist **sg = &pginfo->u.usr.next_sg;
while (*sg != NULL) {
/* loop over desired chunk entries */ if (nr_kpages == kpages_per_hwpage) {
chunk = pginfo->u.usr.next_chunk; pgaddr = (page_to_pfn(sg_page(*sg))
prev_chunk = pginfo->u.usr.next_chunk; << PAGE_SHIFT);
list_for_each_entry_continue( *kpage = pgaddr;
chunk, (&(pginfo->u.usr.region->chunk_list)), list) { if (!(*kpage)) {
for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { ehca_gen_err("pgaddr=%llx entry=%llx",
if (nr_kpages == kpages_per_hwpage) { pgaddr, pginfo->u.usr.next_nmap);
pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) ret = -EFAULT;
<< PAGE_SHIFT ); return ret;
*kpage = pgaddr; }
if ( !(*kpage) ) { /*
ehca_gen_err("pgaddr=%llx i=%x", * The first page in a hwpage must be aligned;
pgaddr, i); * the first MR page is exempt from this rule.
*/
if (pgaddr & (pginfo->hwpage_size - 1)) {
if (pginfo->hwpage_cnt) {
ehca_gen_err(
"invalid alignment "
"pgaddr=%llx entry=%llx "
"mr_pgsize=%llx",
pgaddr, pginfo->u.usr.next_nmap,
pginfo->hwpage_size);
ret = -EFAULT; ret = -EFAULT;
return ret; return ret;
} }
/* /* first MR page */
* The first page in a hwpage must be aligned; pginfo->kpage_cnt =
* the first MR page is exempt from this rule. (pgaddr &
*/ (pginfo->hwpage_size - 1)) >>
if (pgaddr & (pginfo->hwpage_size - 1)) { PAGE_SHIFT;
if (pginfo->hwpage_cnt) { nr_kpages -= pginfo->kpage_cnt;
ehca_gen_err( *kpage = pgaddr &
"invalid alignment " ~(pginfo->hwpage_size - 1);
"pgaddr=%llx i=%x "
"mr_pgsize=%llx",
pgaddr, i,
pginfo->hwpage_size);
ret = -EFAULT;
return ret;
}
/* first MR page */
pginfo->kpage_cnt =
(pgaddr &
(pginfo->hwpage_size - 1)) >>
PAGE_SHIFT;
nr_kpages -= pginfo->kpage_cnt;
*kpage = pgaddr &
~(pginfo->hwpage_size - 1);
}
if (ehca_debug_level >= 3) {
u64 val = *(u64 *)__va(pgaddr);
ehca_gen_dbg("kpage=%llx chunk_page=%llx "
"value=%016llx",
*kpage, pgaddr, val);
}
prev_pgaddr = pgaddr;
i++;
pginfo->kpage_cnt++;
pginfo->u.usr.next_nmap++;
nr_kpages--;
if (!nr_kpages)
goto next_kpage;
continue;
} }
if (i + nr_kpages > chunk->nmap) { if (ehca_debug_level >= 3) {
ret = ehca_check_kpages_per_ate( u64 val = *(u64 *)__va(pgaddr);
chunk->page_list, i, ehca_gen_dbg("kpage=%llx page=%llx "
chunk->nmap - 1, &prev_pgaddr); "value=%016llx",
if (ret) return ret; *kpage, pgaddr, val);
pginfo->kpage_cnt += chunk->nmap - i;
pginfo->u.usr.next_nmap += chunk->nmap - i;
nr_kpages -= chunk->nmap - i;
break;
} }
prev_pgaddr = pgaddr;
*sg = sg_next(*sg);
pginfo->kpage_cnt++;
pginfo->u.usr.next_nmap++;
nr_kpages--;
if (!nr_kpages)
goto next_kpage;
continue;
}
ret = ehca_check_kpages_per_ate(sg, nr_kpages,
&prev_pgaddr);
if (ret)
return ret;
pginfo->kpage_cnt += nr_kpages;
pginfo->u.usr.next_nmap += nr_kpages;
ret = ehca_check_kpages_per_ate(chunk->page_list, i,
i + nr_kpages - 1,
&prev_pgaddr);
if (ret) return ret;
i += nr_kpages;
pginfo->kpage_cnt += nr_kpages;
pginfo->u.usr.next_nmap += nr_kpages;
next_kpage: next_kpage:
nr_kpages = kpages_per_hwpage; nr_kpages = kpages_per_hwpage;
(pginfo->hwpage_cnt)++; (pginfo->hwpage_cnt)++;
kpage++; kpage++;
j++; j++;
if (j >= number) break; if (j >= number)
}
if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
(j >= number)) {
pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk;
break;
} else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk;
} else if (j >= number)
break; break;
else
prev_chunk = chunk;
} }
pginfo->u.usr.next_chunk =
list_prepare_entry(prev_chunk,
(&(pginfo->u.usr.region->chunk_list)),
list);
return ret; return ret;
} }
......
...@@ -188,8 +188,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -188,8 +188,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
struct ipath_mr *mr; struct ipath_mr *mr;
struct ib_umem *umem; struct ib_umem *umem;
struct ib_umem_chunk *chunk; int n, m, entry;
int n, m, i; struct scatterlist *sg;
struct ib_mr *ret; struct ib_mr *ret;
if (length == 0) { if (length == 0) {
...@@ -202,10 +202,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -202,10 +202,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *) umem; return (void *) umem;
n = 0; n = umem->nmap;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table); mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) { if (!mr) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
...@@ -224,22 +221,20 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -224,22 +221,20 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
m = 0; m = 0;
n = 0; n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (i = 0; i < chunk->nents; i++) { void *vaddr;
void *vaddr;
vaddr = page_address(sg_page(sg));
vaddr = page_address(sg_page(&chunk->page_list[i])); if (!vaddr) {
if (!vaddr) { ret = ERR_PTR(-EINVAL);
ret = ERR_PTR(-EINVAL); goto bail;
goto bail; }
} mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].vaddr = vaddr; mr->mr.map[m]->segs[n].length = umem->page_size;
mr->mr.map[m]->segs[n].length = umem->page_size; n++;
n++; if (n == IPATH_SEGSZ) {
if (n == IPATH_SEGSZ) { m++;
m++; n = 0;
n = 0;
}
} }
} }
ret = &mr->ibmr; ret = &mr->ibmr;
......
...@@ -45,7 +45,6 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, ...@@ -45,7 +45,6 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_db *db) struct mlx4_db *db)
{ {
struct mlx4_ib_user_db_page *page; struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0; int err = 0;
mutex_lock(&context->db_page_mutex); mutex_lock(&context->db_page_mutex);
...@@ -73,8 +72,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, ...@@ -73,8 +72,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list); list_add(&page->list, &context->db_page_list);
found: found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page; db->u.user_page = page;
++page->refcnt; ++page->refcnt;
......
...@@ -90,11 +90,11 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, ...@@ -90,11 +90,11 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem) struct ib_umem *umem)
{ {
u64 *pages; u64 *pages;
struct ib_umem_chunk *chunk; int i, k, entry;
int i, j, k;
int n; int n;
int len; int len;
int err = 0; int err = 0;
struct scatterlist *sg;
pages = (u64 *) __get_free_page(GFP_KERNEL); pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages) if (!pages)
...@@ -102,26 +102,25 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, ...@@ -102,26 +102,25 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
i = n = 0; i = n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> mtt->page_shift;
len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] = sg_dma_address(sg) +
pages[i++] = sg_dma_address(&chunk->page_list[j]) + umem->page_size * k;
umem->page_size * k; /*
/* * Be friendly to mlx4_write_mtt() and
* Be friendly to mlx4_write_mtt() and * pass it chunks of appropriate size.
* pass it chunks of appropriate size. */
*/ if (i == PAGE_SIZE / sizeof (u64)) {
if (i == PAGE_SIZE / sizeof (u64)) { err = mlx4_write_mtt(dev->dev, mtt, n,
err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
i, pages); if (err)
if (err) goto out;
goto out; n += i;
n += i; i = 0;
i = 0;
}
} }
} }
}
if (i) if (i)
err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
......
...@@ -366,6 +366,38 @@ static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) ...@@ -366,6 +366,38 @@ static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
mlx5_buf_free(&dev->mdev, &buf->buf); mlx5_buf_free(&dev->mdev, &buf->buf);
} }
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
struct ib_sig_err *item)
{
u16 syndrome = be16_to_cpu(cqe->syndrome);
#define GUARD_ERR (1 << 13)
#define APPTAG_ERR (1 << 12)
#define REFTAG_ERR (1 << 11)
if (syndrome & GUARD_ERR) {
item->err_type = IB_SIG_BAD_GUARD;
item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
} else
if (syndrome & REFTAG_ERR) {
item->err_type = IB_SIG_BAD_REFTAG;
item->expected = be32_to_cpu(cqe->expected_reftag);
item->actual = be32_to_cpu(cqe->actual_reftag);
} else
if (syndrome & APPTAG_ERR) {
item->err_type = IB_SIG_BAD_APPTAG;
item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
} else {
pr_err("Got signature completion error with bad syndrome %04x\n",
syndrome);
}
item->sig_err_offset = be64_to_cpu(cqe->err_offset);
item->key = be32_to_cpu(cqe->mkey);
}
static int mlx5_poll_one(struct mlx5_ib_cq *cq, static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp, struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc) struct ib_wc *wc)
...@@ -375,6 +407,9 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, ...@@ -375,6 +407,9 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64; struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp; struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq; struct mlx5_ib_wq *wq;
struct mlx5_sig_err_cqe *sig_err_cqe;
struct mlx5_core_mr *mmr;
struct mlx5_ib_mr *mr;
uint8_t opcode; uint8_t opcode;
uint32_t qpn; uint32_t qpn;
u16 wqe_ctr; u16 wqe_ctr;
...@@ -475,6 +510,33 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, ...@@ -475,6 +510,33 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
} }
} }
break; break;
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
read_lock(&dev->mdev.priv.mr_table.lock);
mmr = __mlx5_mr_lookup(&dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
if (unlikely(!mmr)) {
read_unlock(&dev->mdev.priv.mr_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
}
mr = to_mibmr(mmr);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
mr->sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
cq->mcq.cqn, mr->sig->err_item.key,
mr->sig->err_item.err_type,
mr->sig->err_item.sig_err_offset,
mr->sig->err_item.expected,
mr->sig->err_item.actual);
read_unlock(&dev->mdev.priv.mr_table.lock);
goto repoll;
} }
return 0; return 0;
......
...@@ -47,7 +47,6 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, ...@@ -47,7 +47,6 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db) struct mlx5_db *db)
{ {
struct mlx5_ib_user_db_page *page; struct mlx5_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0; int err = 0;
mutex_lock(&context->db_page_mutex); mutex_lock(&context->db_page_mutex);
...@@ -75,8 +74,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, ...@@ -75,8 +74,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list); list_add(&page->list, &context->db_page_list);
found: found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page; db->u.user_page = page;
++page->refcnt; ++page->refcnt;
......
...@@ -273,6 +273,15 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, ...@@ -273,6 +273,15 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (flags & MLX5_DEV_CAP_FLAG_XRC) if (flags & MLX5_DEV_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_XRC;
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) {
props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
/* At this stage no support for signature handover */
props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
IB_PROT_T10DIF_TYPE_2 |
IB_PROT_T10DIF_TYPE_3;
props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
IB_GUARD_T10DIF_CSUM;
}
props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
0xffffff; 0xffffff;
...@@ -1423,12 +1432,15 @@ static int init_one(struct pci_dev *pdev, ...@@ -1423,12 +1432,15 @@ static int init_one(struct pci_dev *pdev,
dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
dev->ib_dev.destroy_mr = mlx5_ib_destroy_mr;
dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
dev->ib_dev.process_mad = mlx5_ib_process_mad; dev->ib_dev.process_mad = mlx5_ib_process_mad;
dev->ib_dev.create_mr = mlx5_ib_create_mr;
dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
......
...@@ -44,16 +44,17 @@ ...@@ -44,16 +44,17 @@
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order) int *ncont, int *order)
{ {
struct ib_umem_chunk *chunk;
unsigned long tmp; unsigned long tmp;
unsigned long m; unsigned long m;
int i, j, k; int i, k;
u64 base = 0; u64 base = 0;
int p = 0; int p = 0;
int skip; int skip;
int mask; int mask;
u64 len; u64 len;
u64 pfn; u64 pfn;
struct scatterlist *sg;
int entry;
addr = addr >> PAGE_SHIFT; addr = addr >> PAGE_SHIFT;
tmp = (unsigned long)addr; tmp = (unsigned long)addr;
...@@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, ...@@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
skip = 1 << m; skip = 1 << m;
mask = skip - 1; mask = skip - 1;
i = 0; i = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (j = 0; j < chunk->nmap; j++) { len = sg_dma_len(sg) >> PAGE_SHIFT;
len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; pfn = sg_dma_address(sg) >> PAGE_SHIFT;
pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; for (k = 0; k < len; k++) {
for (k = 0; k < len; k++) { if (!(i & mask)) {
if (!(i & mask)) { tmp = (unsigned long)pfn;
tmp = (unsigned long)pfn; m = min(m, find_first_bit(&tmp, sizeof(tmp)));
m = min(m, find_first_bit(&tmp, sizeof(tmp))); skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m; skip = 1 << m;
mask = skip - 1; mask = skip - 1;
base = pfn; base = pfn;
p = 0; p = 0;
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
}
} }
p++;
i++;
} }
p++;
i++;
} }
}
if (i) { if (i) {
m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
...@@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
{ {
int shift = page_shift - PAGE_SHIFT; int shift = page_shift - PAGE_SHIFT;
int mask = (1 << shift) - 1; int mask = (1 << shift) - 1;
struct ib_umem_chunk *chunk; int i, k;
int i, j, k;
u64 cur = 0; u64 cur = 0;
u64 base; u64 base;
int len; int len;
struct scatterlist *sg;
int entry;
i = 0; i = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (j = 0; j < chunk->nmap; j++) { len = sg_dma_len(sg) >> PAGE_SHIFT;
len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; base = sg_dma_address(sg);
base = sg_dma_address(&chunk->page_list[j]); for (k = 0; k < len; k++) {
for (k = 0; k < len; k++) { if (!(i & mask)) {
if (!(i & mask)) { cur = base + (k << PAGE_SHIFT);
cur = base + (k << PAGE_SHIFT); if (umr)
if (umr) cur |= 3;
cur |= 3;
pas[i >> shift] = cpu_to_be64(cur); pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
i >> shift, be64_to_cpu(pas[i >> shift])); i >> shift, be64_to_cpu(pas[i >> shift]));
} else } else
mlx5_ib_dbg(dev, "=====> 0x%llx\n", mlx5_ib_dbg(dev, "=====> 0x%llx\n",
base + (k << PAGE_SHIFT)); base + (k << PAGE_SHIFT));
i++; i++;
}
} }
}
} }
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
......
...@@ -189,6 +189,9 @@ struct mlx5_ib_qp { ...@@ -189,6 +189,9 @@ struct mlx5_ib_qp {
int create_type; int create_type;
u32 pa_lkey; u32 pa_lkey;
/* Store signature errors */
bool signature_en;
}; };
struct mlx5_ib_cq_buf { struct mlx5_ib_cq_buf {
...@@ -265,6 +268,7 @@ struct mlx5_ib_mr { ...@@ -265,6 +268,7 @@ struct mlx5_ib_mr {
enum ib_wc_status status; enum ib_wc_status status;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out; struct mlx5_create_mkey_mbox_out out;
struct mlx5_core_sig_ctx *sig;
}; };
struct mlx5_ib_fast_reg_page_list { struct mlx5_ib_fast_reg_page_list {
...@@ -396,6 +400,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) ...@@ -396,6 +400,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp, mqp); return container_of(mqp, struct mlx5_ib_qp, mqp);
} }
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
{
return container_of(mmr, struct mlx5_ib_mr, mmr);
}
static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
{ {
return container_of(ibpd, struct mlx5_ib_pd, ibpd); return container_of(ibpd, struct mlx5_ib_pd, ibpd);
...@@ -495,6 +504,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -495,6 +504,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr); int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr);
struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len); int max_page_list_len);
struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
...@@ -530,6 +542,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); ...@@ -530,6 +542,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
static inline void init_query_mad(struct ib_smp *mad) static inline void init_query_mad(struct ib_smp *mad)
{ {
......
...@@ -992,6 +992,122 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) ...@@ -992,6 +992,122 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
return 0; return 0;
} }
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
int access_mode, err;
int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
in->seg.status = 1 << 6; /* free */
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
access_mode = MLX5_ACCESS_MODE_MTT;
if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
u32 psv_index[2];
in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
MLX5_MKEY_BSF_EN);
in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
if (!mr->sig) {
err = -ENOMEM;
goto err_free_in;
}
/* create mem & wire PSVs */
err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
2, psv_index);
if (err)
goto err_free_sig;
access_mode = MLX5_ACCESS_MODE_KLM;
mr->sig->psv_memory.psv_idx = psv_index[0];
mr->sig->psv_wire.psv_idx = psv_index[1];
mr->sig->sig_status_checked = true;
mr->sig->sig_err_exists = false;
/* Next UMR, Arm SIGERR */
++mr->sig->sigerr_count;
}
in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
NULL, NULL, NULL);
if (err)
goto err_destroy_psv;
mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.rkey = mr->mmr.key;
mr->umem = NULL;
kfree(in);
return &mr->ibmr;
err_destroy_psv:
if (mr->sig) {
if (mlx5_core_destroy_psv(&dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
if (mlx5_core_destroy_psv(&dev->mdev,
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
}
err_free_sig:
kfree(mr->sig);
err_free_in:
kfree(in);
err_free:
kfree(mr);
return ERR_PTR(err);
}
int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
{
struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int err;
if (mr->sig) {
if (mlx5_core_destroy_psv(&dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
if (mlx5_core_destroy_psv(&dev->mdev,
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
kfree(mr->sig);
}
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
return err;
}
kfree(mr);
return err;
}
struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len) int max_page_list_len)
{ {
...@@ -1077,3 +1193,44 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) ...@@ -1077,3 +1193,44 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
kfree(mfrpl->ibfrpl.page_list); kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl); kfree(mfrpl);
} }
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status)
{
struct mlx5_ib_mr *mmr = to_mmr(ibmr);
int ret = 0;
if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
pr_err("Invalid status check mask\n");
ret = -EINVAL;
goto done;
}
mr_status->fail_status = 0;
if (check_mask & IB_MR_CHECK_SIG_STATUS) {
if (!mmr->sig) {
ret = -EINVAL;
pr_err("signature status check requested on a non-signature enabled MR\n");
goto done;
}
mmr->sig->sig_status_checked = true;
if (!mmr->sig->sig_err_exists)
goto done;
if (ibmr->lkey == mmr->sig->err_item.key)
memcpy(&mr_status->sig_err, &mmr->sig->err_item,
sizeof(mr_status->sig_err));
else {
mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
mr_status->sig_err.sig_err_offset = 0;
mr_status->sig_err.key = mmr->sig->err_item.key;
}
mmr->sig->sig_err_exists = false;
mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
}
done:
return ret;
}
This diff is collapsed.
...@@ -976,12 +976,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -976,12 +976,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata) u64 virt, int acc, struct ib_udata *udata)
{ {
struct mthca_dev *dev = to_mdev(pd->device); struct mthca_dev *dev = to_mdev(pd->device);
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct mthca_mr *mr; struct mthca_mr *mr;
struct mthca_reg_mr ucmd; struct mthca_reg_mr ucmd;
u64 *pages; u64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
int write_mtt_size; int write_mtt_size;
...@@ -1009,10 +1009,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1009,10 +1009,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
shift = ffs(mr->umem->page_size) - 1; shift = ffs(mr->umem->page_size) - 1;
n = mr->umem->nmap;
n = 0;
list_for_each_entry(chunk, &mr->umem->chunk_list, list)
n += chunk->nents;
mr->mtt = mthca_alloc_mtt(dev, n); mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) { if (IS_ERR(mr->mtt)) {
...@@ -1030,25 +1027,24 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1030,25 +1027,24 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
list_for_each_entry(chunk, &mr->umem->chunk_list, list) for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] = sg_dma_address(sg) +
pages[i++] = sg_dma_address(&chunk->page_list[j]) + mr->umem->page_size * k;
mr->umem->page_size * k; /*
/* * Be friendly to write_mtt and pass it chunks
* Be friendly to write_mtt and pass it chunks * of appropriate size.
* of appropriate size. */
*/ if (i == write_mtt_size) {
if (i == write_mtt_size) { err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
err = mthca_write_mtt(dev, mr->mtt, n, pages, i); if (err)
if (err) goto mtt_done;
goto mtt_done; n += i;
n += i; i = 0;
i = 0;
}
} }
} }
}
if (i) if (i)
err = mthca_write_mtt(dev, mr->mtt, n, pages, i); err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
......
This diff is collapsed.
...@@ -726,10 +726,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -726,10 +726,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
u32 num_pbes) u32 num_pbes)
{ {
struct ocrdma_pbe *pbe; struct ocrdma_pbe *pbe;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
struct ib_umem *umem = mr->umem; struct ib_umem *umem = mr->umem;
int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
if (!mr->hwmr.num_pbes) if (!mr->hwmr.num_pbes)
return; return;
...@@ -739,39 +739,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -739,39 +739,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
shift = ilog2(umem->page_size); shift = ilog2(umem->page_size);
list_for_each_entry(chunk, &umem->chunk_list, list) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
/* get all the dma regions from the chunk. */ pages = sg_dma_len(sg) >> shift;
for (i = 0; i < chunk->nmap; i++) { for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
pages = sg_dma_len(&chunk->page_list[i]) >> shift; /* store the page address in pbe */
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { pbe->pa_lo =
/* store the page address in pbe */ cpu_to_le32(sg_dma_address
pbe->pa_lo = (sg) +
cpu_to_le32(sg_dma_address (umem->page_size * pg_cnt));
(&chunk->page_list[i]) + pbe->pa_hi =
(umem->page_size * pg_cnt)); cpu_to_le32(upper_32_bits
pbe->pa_hi = ((sg_dma_address
cpu_to_le32(upper_32_bits (sg) +
((sg_dma_address umem->page_size * pg_cnt)));
(&chunk->page_list[i]) + pbe_cnt += 1;
umem->page_size * pg_cnt))); total_num_pbes += 1;
pbe_cnt += 1; pbe++;
total_num_pbes += 1;
pbe++; /* if done building pbes, issue the mbx cmd. */
if (total_num_pbes == num_pbes)
/* if done building pbes, issue the mbx cmd. */ return;
if (total_num_pbes == num_pbes)
return; /* if the given pbl is full storing the pbes,
* move to next pbl.
/* if the given pbl is full storing the pbes, */
* move to next pbl. if (pbe_cnt ==
*/ (mr->hwmr.pbl_size / sizeof(u64))) {
if (pbe_cnt == pbl_tbl++;
(mr->hwmr.pbl_size / sizeof(u64))) { pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbl_tbl++; pbe_cnt = 0;
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0;
}
} }
} }
} }
} }
......
...@@ -232,8 +232,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -232,8 +232,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
struct qib_mr *mr; struct qib_mr *mr;
struct ib_umem *umem; struct ib_umem *umem;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
int n, m, i; int n, m, entry;
struct ib_mr *ret; struct ib_mr *ret;
if (length == 0) { if (length == 0) {
...@@ -246,9 +246,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -246,9 +246,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *) umem; return (void *) umem;
n = 0; n = umem->nmap;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, pd); mr = alloc_mr(n, pd);
if (IS_ERR(mr)) { if (IS_ERR(mr)) {
...@@ -268,11 +266,10 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -268,11 +266,10 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.page_shift = ilog2(umem->page_size); mr->mr.page_shift = ilog2(umem->page_size);
m = 0; m = 0;
n = 0; n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr; void *vaddr;
vaddr = page_address(sg_page(&chunk->page_list[i])); vaddr = page_address(sg_page(sg));
if (!vaddr) { if (!vaddr) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto bail; goto bail;
...@@ -284,7 +281,6 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -284,7 +281,6 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
m++; m++;
n = 0; n = 0;
} }
}
} }
ret = &mr->ibmr; ret = &mr->ibmr;
......
...@@ -446,6 +446,7 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -446,6 +446,7 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_cq_table(dev); mlx5_init_cq_table(dev);
mlx5_init_qp_table(dev); mlx5_init_qp_table(dev);
mlx5_init_srq_table(dev); mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
return 0; return 0;
......
...@@ -36,11 +36,24 @@ ...@@ -36,11 +36,24 @@
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include "mlx5_core.h" #include "mlx5_core.h"
void mlx5_init_mr_table(struct mlx5_core_dev *dev)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
rwlock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
{
}
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen, struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context, mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out) struct mlx5_create_mkey_mbox_out *out)
{ {
struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_create_mkey_mbox_out lout; struct mlx5_create_mkey_mbox_out lout;
int err; int err;
u8 key; u8 key;
...@@ -73,14 +86,21 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, ...@@ -73,14 +86,21 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
be32_to_cpu(lout.mkey), key, mr->key); be32_to_cpu(lout.mkey), key, mr->key);
/* connect to MR tree */
write_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
write_unlock_irq(&table->lock);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_core_create_mkey); EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
{ {
struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out; struct mlx5_destroy_mkey_mbox_out out;
unsigned long flags;
int err; int err;
memset(&in, 0, sizeof(in)); memset(&in, 0, sizeof(in));
...@@ -95,6 +115,10 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) ...@@ -95,6 +115,10 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
if (out.hdr.status) if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr); return mlx5_cmd_status_to_err(&out.hdr);
write_lock_irqsave(&table->lock, flags);
radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
write_unlock_irqrestore(&table->lock, flags);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_core_destroy_mkey); EXPORT_SYMBOL(mlx5_core_destroy_mkey);
...@@ -144,3 +168,64 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, ...@@ -144,3 +168,64 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
return err; return err;
} }
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
struct mlx5_allocate_psv_in in;
struct mlx5_allocate_psv_out out;
int i, err;
if (npsvs > MLX5_MAX_PSVS)
return -EINVAL;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) {
mlx5_core_err(dev, "cmd exec failed %d\n", err);
return err;
}
if (out.hdr.status) {
mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
for (i = 0; i < npsvs; i++)
sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
return err;
}
EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
struct mlx5_destroy_psv_in in;
struct mlx5_destroy_psv_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.psv_number = cpu_to_be32(psv_num);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) {
mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
goto out;
}
if (out.hdr.status) {
mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
out:
return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
...@@ -80,6 +80,7 @@ enum { ...@@ -80,6 +80,7 @@ enum {
MLX5_CQE_RESP_SEND_IMM = 3, MLX5_CQE_RESP_SEND_IMM = 3,
MLX5_CQE_RESP_SEND_INV = 4, MLX5_CQE_RESP_SEND_INV = 4,
MLX5_CQE_RESIZE_CQ = 5, MLX5_CQE_RESIZE_CQ = 5,
MLX5_CQE_SIG_ERR = 12,
MLX5_CQE_REQ_ERR = 13, MLX5_CQE_REQ_ERR = 13,
MLX5_CQE_RESP_ERR = 14, MLX5_CQE_RESP_ERR = 14,
MLX5_CQE_INVALID = 15, MLX5_CQE_INVALID = 15,
......
...@@ -48,6 +48,8 @@ enum { ...@@ -48,6 +48,8 @@ enum {
MLX5_MAX_COMMANDS = 32, MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512, MLX5_CMD_DATA_BLOCK_SIZE = 512,
MLX5_PCI_CMD_XPORT = 7, MLX5_PCI_CMD_XPORT = 7,
MLX5_MKEY_BSF_OCTO_SIZE = 4,
MLX5_MAX_PSVS = 4,
}; };
enum { enum {
...@@ -116,6 +118,7 @@ enum { ...@@ -116,6 +118,7 @@ enum {
MLX5_MKEY_MASK_START_ADDR = 1ull << 6, MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
MLX5_MKEY_MASK_PD = 1ull << 7, MLX5_MKEY_MASK_PD = 1ull << 7,
MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
MLX5_MKEY_MASK_BSF_EN = 1ull << 12, MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
MLX5_MKEY_MASK_KEY = 1ull << 13, MLX5_MKEY_MASK_KEY = 1ull << 13,
MLX5_MKEY_MASK_QPN = 1ull << 14, MLX5_MKEY_MASK_QPN = 1ull << 14,
...@@ -555,6 +558,23 @@ struct mlx5_cqe64 { ...@@ -555,6 +558,23 @@ struct mlx5_cqe64 {
u8 op_own; u8 op_own;
}; };
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
__be32 actual_trans_sig;
__be32 expected_reftag;
__be32 actual_reftag;
__be16 syndrome;
u8 rsvd22[2];
__be32 mkey;
__be64 err_offset;
u8 rsvd30[8];
__be32 qpn;
u8 rsvd38[2];
u8 signature;
u8 op_own;
};
struct mlx5_wqe_srq_next_seg { struct mlx5_wqe_srq_next_seg {
u8 rsvd0[2]; u8 rsvd0[2];
__be16 next_wqe_index; __be16 next_wqe_index;
...@@ -936,4 +956,27 @@ enum { ...@@ -936,4 +956,27 @@ enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
}; };
struct mlx5_allocate_psv_in {
struct mlx5_inbox_hdr hdr;
__be32 npsv_pd;
__be32 rsvd_psv0;
};
struct mlx5_allocate_psv_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
__be32 psv_idx[4];
};
struct mlx5_destroy_psv_in {
struct mlx5_inbox_hdr hdr;
__be32 psv_number;
u8 rsvd[4];
};
struct mlx5_destroy_psv_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
#endif /* MLX5_DEVICE_H */ #endif /* MLX5_DEVICE_H */
...@@ -401,6 +401,26 @@ struct mlx5_eq { ...@@ -401,6 +401,26 @@ struct mlx5_eq {
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
}; };
struct mlx5_core_psv {
u32 psv_idx;
struct psv_layout {
u32 pd;
u16 syndrome;
u16 reserved;
u16 bg;
u16 app_tag;
u32 ref_tag;
} psv;
};
struct mlx5_core_sig_ctx {
struct mlx5_core_psv psv_memory;
struct mlx5_core_psv psv_wire;
struct ib_sig_err err_item;
bool sig_status_checked;
bool sig_err_exists;
u32 sigerr_count;
};
struct mlx5_core_mr { struct mlx5_core_mr {
u64 iova; u64 iova;
...@@ -475,6 +495,13 @@ struct mlx5_srq_table { ...@@ -475,6 +495,13 @@ struct mlx5_srq_table {
struct radix_tree_root tree; struct radix_tree_root tree;
}; };
struct mlx5_mr_table {
/* protect radix tree
*/
rwlock_t lock;
struct radix_tree_root tree;
};
struct mlx5_priv { struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN]; char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table; struct mlx5_eq_table eq_table;
...@@ -504,6 +531,10 @@ struct mlx5_priv { ...@@ -504,6 +531,10 @@ struct mlx5_priv {
struct mlx5_cq_table cq_table; struct mlx5_cq_table cq_table;
/* end: cq staff */ /* end: cq staff */
/* start: mr staff */
struct mlx5_mr_table mr_table;
/* end: mr staff */
/* start: alloc staff */ /* start: alloc staff */
struct mutex pgdir_mutex; struct mutex pgdir_mutex;
struct list_head pgdir_list; struct list_head pgdir_list;
...@@ -651,6 +682,11 @@ static inline void mlx5_vfree(const void *addr) ...@@ -651,6 +682,11 @@ static inline void mlx5_vfree(const void *addr)
kfree(addr); kfree(addr);
} }
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
}
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
void mlx5_dev_cleanup(struct mlx5_core_dev *dev); void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev); int mlx5_cmd_init(struct mlx5_core_dev *dev);
...@@ -685,6 +721,8 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, ...@@ -685,6 +721,8 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out); struct mlx5_query_srq_mbox_out *out);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq); u16 lwm, int is_srq);
void mlx5_init_mr_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen, struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context, mlx5_cmd_cbk_t callback, void *context,
...@@ -746,6 +784,9 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); ...@@ -746,6 +784,9 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command); const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
static inline u32 mlx5_mkey_to_idx(u32 mkey) static inline u32 mlx5_mkey_to_idx(u32 mkey)
{ {
......
...@@ -37,6 +37,9 @@ ...@@ -37,6 +37,9 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#define MLX5_INVALID_LKEY 0x100 #define MLX5_INVALID_LKEY 0x100
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
#define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400
enum mlx5_qp_optpar { enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
...@@ -151,6 +154,11 @@ enum { ...@@ -151,6 +154,11 @@ enum {
MLX5_SND_DBR = 1, MLX5_SND_DBR = 1,
}; };
enum {
MLX5_FLAGS_INLINE = 1<<7,
MLX5_FLAGS_CHECK_FREE = 1<<5,
};
struct mlx5_wqe_fmr_seg { struct mlx5_wqe_fmr_seg {
__be32 flags; __be32 flags;
__be32 mem_key; __be32 mem_key;
...@@ -278,6 +286,60 @@ struct mlx5_wqe_inline_seg { ...@@ -278,6 +286,60 @@ struct mlx5_wqe_inline_seg {
__be32 byte_count; __be32 byte_count;
}; };
struct mlx5_bsf {
struct mlx5_bsf_basic {
u8 bsf_size_sbs;
u8 check_byte_mask;
union {
u8 copy_byte_mask;
u8 bs_selector;
u8 rsvd_wflags;
} wire;
union {
u8 bs_selector;
u8 rsvd_mflags;
} mem;
__be32 raw_data_size;
__be32 w_bfs_psv;
__be32 m_bfs_psv;
} basic;
struct mlx5_bsf_ext {
__be32 t_init_gen_pro_size;
__be32 rsvd_epi_size;
__be32 w_tfs_psv;
__be32 m_tfs_psv;
} ext;
struct mlx5_bsf_inl {
__be32 w_inl_vld;
__be32 w_rsvd;
__be64 w_block_format;
__be32 m_inl_vld;
__be32 m_rsvd;
__be64 m_block_format;
} inl;
};
struct mlx5_klm {
__be32 bcount;
__be32 key;
__be64 va;
};
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
__be32 key;
__be64 va;
};
struct mlx5_stride_block_ctrl_seg {
__be32 bcount_per_cycle;
__be32 op;
__be32 repeat_count;
u16 rsvd;
__be16 num_entries;
};
struct mlx5_core_qp { struct mlx5_core_qp {
void (*event) (struct mlx5_core_qp *, int); void (*event) (struct mlx5_core_qp *, int);
int qpn; int qpn;
...@@ -444,6 +506,11 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u ...@@ -444,6 +506,11 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
} }
static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
{
return radix_tree_lookup(&dev->priv.mr_table.tree, key);
}
int mlx5_core_create_qp(struct mlx5_core_dev *dev, int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp, struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in, struct mlx5_create_qp_mbox_in *in,
......
...@@ -46,17 +46,12 @@ struct ib_umem { ...@@ -46,17 +46,12 @@ struct ib_umem {
int page_size; int page_size;
int writable; int writable;
int hugetlb; int hugetlb;
struct list_head chunk_list;
struct work_struct work; struct work_struct work;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long diff; unsigned long diff;
}; struct sg_table sg_head;
int nmap;
struct ib_umem_chunk { int npages;
struct list_head list;
int nents;
int nmap;
struct scatterlist page_list[0];
}; };
#ifdef CONFIG_INFINIBAND_USER_MEM #ifdef CONFIG_INFINIBAND_USER_MEM
......
...@@ -122,7 +122,19 @@ enum ib_device_cap_flags { ...@@ -122,7 +122,19 @@ enum ib_device_cap_flags {
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29) IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
};
enum ib_signature_prot_cap {
IB_PROT_T10DIF_TYPE_1 = 1,
IB_PROT_T10DIF_TYPE_2 = 1 << 1,
IB_PROT_T10DIF_TYPE_3 = 1 << 2,
};
enum ib_signature_guard_cap {
IB_GUARD_T10DIF_CRC = 1,
IB_GUARD_T10DIF_CSUM = 1 << 1,
}; };
enum ib_atomic_cap { enum ib_atomic_cap {
...@@ -172,6 +184,8 @@ struct ib_device_attr { ...@@ -172,6 +184,8 @@ struct ib_device_attr {
unsigned int max_fast_reg_page_list_len; unsigned int max_fast_reg_page_list_len;
u16 max_pkeys; u16 max_pkeys;
u8 local_ca_ack_delay; u8 local_ca_ack_delay;
int sig_prot_cap;
int sig_guard_cap;
}; };
enum ib_mtu { enum ib_mtu {
...@@ -461,6 +475,130 @@ int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; ...@@ -461,6 +475,130 @@ int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
*/ */
int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
enum ib_mr_create_flags {
IB_MR_SIGNATURE_EN = 1,
};
/**
* ib_mr_init_attr - Memory region init attributes passed to routine
* ib_create_mr.
* @max_reg_descriptors: max number of registration descriptors that
* may be used with registration work requests.
* @flags: MR creation flags bit mask.
*/
struct ib_mr_init_attr {
int max_reg_descriptors;
u32 flags;
};
enum ib_signature_type {
IB_SIG_TYPE_T10_DIF,
};
/**
* T10-DIF Signature types
* T10-DIF types are defined by SCSI
* specifications.
*/
enum ib_t10_dif_type {
IB_T10DIF_NONE,
IB_T10DIF_TYPE1,
IB_T10DIF_TYPE2,
IB_T10DIF_TYPE3
};
/**
* Signature T10-DIF block-guard types
* IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
* IB_T10DIF_CSUM: Corresponds to IP checksum rules.
*/
enum ib_t10_dif_bg_type {
IB_T10DIF_CRC,
IB_T10DIF_CSUM
};
/**
* struct ib_t10_dif_domain - Parameters specific for T10-DIF
* domain.
* @type: T10-DIF type (0|1|2|3)
* @bg_type: T10-DIF block guard type (CRC|CSUM)
* @pi_interval: protection information interval.
* @bg: seed of guard computation.
* @app_tag: application tag of guard block
* @ref_tag: initial guard block reference tag.
* @type3_inc_reftag: T10-DIF type 3 does not state
* about the reference tag, it is the user
* choice to increment it or not.
*/
struct ib_t10_dif_domain {
enum ib_t10_dif_type type;
enum ib_t10_dif_bg_type bg_type;
u16 pi_interval;
u16 bg;
u16 app_tag;
u32 ref_tag;
bool type3_inc_reftag;
};
/**
* struct ib_sig_domain - Parameters for signature domain
* @sig_type: specific signauture type
* @sig: union of all signature domain attributes that may
* be used to set domain layout.
*/
struct ib_sig_domain {
enum ib_signature_type sig_type;
union {
struct ib_t10_dif_domain dif;
} sig;
};
/**
* struct ib_sig_attrs - Parameters for signature handover operation
* @check_mask: bitmask for signature byte check (8 bytes)
* @mem: memory domain layout desciptor.
* @wire: wire domain layout desciptor.
*/
struct ib_sig_attrs {
u8 check_mask;
struct ib_sig_domain mem;
struct ib_sig_domain wire;
};
enum ib_sig_err_type {
IB_SIG_BAD_GUARD,
IB_SIG_BAD_REFTAG,
IB_SIG_BAD_APPTAG,
};
/**
* struct ib_sig_err - signature error descriptor
*/
struct ib_sig_err {
enum ib_sig_err_type err_type;
u32 expected;
u32 actual;
u64 sig_err_offset;
u32 key;
};
enum ib_mr_status_check {
IB_MR_CHECK_SIG_STATUS = 1,
};
/**
* struct ib_mr_status - Memory region status container
*
* @fail_status: Bitmask of MR checks status. For each
* failed check a corresponding status bit is set.
* @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
* failure.
*/
struct ib_mr_status {
u32 fail_status;
struct ib_sig_err sig_err;
};
/** /**
* mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
* enum. * enum.
...@@ -644,6 +782,7 @@ enum ib_qp_create_flags { ...@@ -644,6 +782,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
IB_QP_CREATE_NETIF_QP = 1 << 5, IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
/* reserve bits 26-31 for low level drivers' internal use */ /* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26, IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31, IB_QP_CREATE_RESERVED_END = 1 << 31,
...@@ -808,6 +947,7 @@ enum ib_wr_opcode { ...@@ -808,6 +947,7 @@ enum ib_wr_opcode {
IB_WR_MASKED_ATOMIC_CMP_AND_SWP, IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
IB_WR_BIND_MW, IB_WR_BIND_MW,
IB_WR_REG_SIG_MR,
/* reserve values for low level drivers' internal use. /* reserve values for low level drivers' internal use.
* These values will not be used at all in the ib core layer. * These values will not be used at all in the ib core layer.
*/ */
...@@ -913,6 +1053,12 @@ struct ib_send_wr { ...@@ -913,6 +1053,12 @@ struct ib_send_wr {
u32 rkey; u32 rkey;
struct ib_mw_bind_info bind_info; struct ib_mw_bind_info bind_info;
} bind_mw; } bind_mw;
struct {
struct ib_sig_attrs *sig_attrs;
struct ib_mr *sig_mr;
int access_flags;
struct ib_sge *prot;
} sig_handover;
} wr; } wr;
u32 xrc_remote_srq_num; /* XRC TGT QPs only */ u32 xrc_remote_srq_num; /* XRC TGT QPs only */
}; };
...@@ -1407,6 +1553,9 @@ struct ib_device { ...@@ -1407,6 +1553,9 @@ struct ib_device {
int (*query_mr)(struct ib_mr *mr, int (*query_mr)(struct ib_mr *mr,
struct ib_mr_attr *mr_attr); struct ib_mr_attr *mr_attr);
int (*dereg_mr)(struct ib_mr *mr); int (*dereg_mr)(struct ib_mr *mr);
int (*destroy_mr)(struct ib_mr *mr);
struct ib_mr * (*create_mr)(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr);
struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
int max_page_list_len); int max_page_list_len);
struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
...@@ -1455,6 +1604,8 @@ struct ib_device { ...@@ -1455,6 +1604,8 @@ struct ib_device {
*flow_attr, *flow_attr,
int domain); int domain);
int (*destroy_flow)(struct ib_flow *flow_id); int (*destroy_flow)(struct ib_flow *flow_id);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_dma_mapping_ops *dma_ops; struct ib_dma_mapping_ops *dma_ops;
...@@ -2250,6 +2401,25 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); ...@@ -2250,6 +2401,25 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
*/ */
int ib_dereg_mr(struct ib_mr *mr); int ib_dereg_mr(struct ib_mr *mr);
/**
* ib_create_mr - Allocates a memory region that may be used for
* signature handover operations.
* @pd: The protection domain associated with the region.
* @mr_init_attr: memory region init attributes.
*/
struct ib_mr *ib_create_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr);
/**
* ib_destroy_mr - Destroys a memory region that was created using
* ib_create_mr and removes it from HW translation tables.
* @mr: The memory region to destroy.
*
* This function can fail, if the memory region has memory windows bound to it.
*/
int ib_destroy_mr(struct ib_mr *mr);
/** /**
* ib_alloc_fast_reg_mr - Allocates memory region usable with the * ib_alloc_fast_reg_mr - Allocates memory region usable with the
* IB_WR_FAST_REG_MR send work request. * IB_WR_FAST_REG_MR send work request.
...@@ -2435,4 +2605,19 @@ static inline int ib_check_mr_access(int flags) ...@@ -2435,4 +2605,19 @@ static inline int ib_check_mr_access(int flags)
return 0; return 0;
} }
/**
* ib_check_mr_status: lightweight check of MR status.
* This routine may provide status checks on a selected
* ib_mr. first use is for signature status check.
*
* @mr: A memory region.
* @check_mask: Bitmask of which checks to perform from
* ib_mr_status_check enumeration.
* @mr_status: The container of relevant status checks.
* failed checks will be indicated in the status bitmask
* and the relevant info shall be in the error item.
*/
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
#endif /* IB_VERBS_H */ #endif /* IB_VERBS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment