Commit cb463364 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Ben Hutchings

IB/qib: Change lkey table allocation to support more MRs

commit d6f1c17e upstream.

The lkey table is allocated with with a get_user_pages() with an
order based on a number of index bits from a module parameter.

The underlying kernel code cannot allocate that many contiguous pages.

There is no reason the underlying memory needs to be physically
contiguous.

This patch:
- switches the allocation/deallocation to vmalloc/vfree
- caps the number of bits to 23 to insure at least 1 generation bit
  o this matches the module parameter description
Reviewed-by: default avatarVinit Agnihotri <vinit.abhay.agnihotri@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
[bwh: Backported to 3.2:
 - Adjust context
 - Add definition of qib_dev_warn(), added upstream by commit ddb88765
   ("IB/qib: Convert opcode counters to per-context")]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent ff8c37e6
......@@ -1421,6 +1421,10 @@ extern struct mutex qib_mutex;
qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
} while (0)
#define qib_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
#define qib_dev_porterr(dd, port, fmt, ...) \
do { \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
......
......@@ -69,6 +69,10 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
* unrestricted LKEY.
*/
rkt->gen++;
/*
* bits are capped in qib_verbs.c to insure enough bits
* for generation number
*/
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
......
......@@ -40,6 +40,7 @@
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include "qib.h"
#include "qib_common.h"
......@@ -2035,10 +2036,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
* the LKEY). The remaining bits act as a generation number or tag.
*/
spin_lock_init(&dev->lk_table.lock);
/* insure generation is at least 4 bits see keys.c */
if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
}
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion **)
__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
......@@ -2208,7 +2215,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
vfree(dev->lk_table.table);
err_lk:
kfree(dev->qp_table);
err_qpt:
......@@ -2262,7 +2269,6 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
free_pages((unsigned long) dev->lk_table.table,
get_order(lk_tab_size));
vfree(dev->lk_table.table);
kfree(dev->qp_table);
}
......@@ -622,6 +622,8 @@ struct qib_qpn_table {
struct qpn_map map[QPNMAP_ENTRIES];
};
#define MAX_LKEY_TABLE_BITS 23
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment