Commit 3ae0e7e8 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Greg Kroah-Hartman

IB/qib: Change lkey table allocation to support more MRs

commit d6f1c17e upstream.

The lkey table is allocated with with a get_user_pages() with an
order based on a number of index bits from a module parameter.

The underlying kernel code cannot allocate that many contiguous pages.

There is no reason the underlying memory needs to be physically
contiguous.

This patch:
- switches the allocation/deallocation to vmalloc/vfree
- caps the number of bits to 23 to insure at least 1 generation bit
  o this matches the module parameter description
Reviewed-by: default avatarVinit Agnihotri <vinit.abhay.agnihotri@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 69d340a6
......@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* unrestricted LKEY.
*/
rkt->gen++;
/*
* bits are capped in qib_verbs.c to insure enough bits
* for generation number
*/
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
......
......@@ -40,6 +40,7 @@
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include "qib.h"
#include "qib_common.h"
......@@ -2109,10 +2110,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
* the LKEY). The remaining bits act as a generation number or tag.
*/
spin_lock_init(&dev->lk_table.lock);
/* insure generation is at least 4 bits see keys.c */
if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
}
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion __rcu **)
__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
......@@ -2286,7 +2293,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
vfree(dev->lk_table.table);
err_lk:
kfree(dev->qp_table);
err_qpt:
......@@ -2340,8 +2347,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
free_pages((unsigned long) dev->lk_table.table,
get_order(lk_tab_size));
vfree(dev->lk_table.table);
kfree(dev->qp_table);
}
......
......@@ -647,6 +647,8 @@ struct qib_qpn_table {
struct qpn_map map[QPNMAP_ENTRIES];
};
#define MAX_LKEY_TABLE_BITS 23
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment