Commit 2b8fb286 authored by Marcel Apfelbaum's avatar Marcel Apfelbaum Committed by David S. Miller

mlx4_core: mtts resources units changed to offset

In the previous implementation mtts are managed by:
1. order     - log(mtt segments), 'mtt segment' groups several mtts together.
2. first_seg - segment location relative to mtt table.
In the current implementation:
1. order     - log(mtts) rather than segments
2. offset    - mtt index in mtt table

Note: The actual mtt allocation is made in segments but it is
      transparent to callers.

Rational: The mtt resource holders are not interested on how the allocation
          of mtt is done, but rather on how they will use it.
Signed-off-by: default avatarMarcel Apfelbaum <marcela@dev.mellanox.co.il>
Reviewed-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5b4c4d36
...@@ -209,7 +209,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -209,7 +209,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.num_mpts; size = dev->caps.num_mpts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
size = dev->caps.num_mtt_segs * dev->caps.mtts_per_seg; size = dev->caps.num_mtts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
size = dev->caps.num_mgms + dev->caps.num_amgms; size = dev->caps.num_mgms + dev->caps.num_amgms;
......
...@@ -112,7 +112,7 @@ module_param_named(use_prio, use_prio, bool, 0444); ...@@ -112,7 +112,7 @@ module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)"); "(0/1, default 0)");
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
...@@ -222,9 +222,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -222,9 +222,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs;
dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
dev->caps.mtts_per_seg);
dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_pds = dev_cap->reserved_pds;
...@@ -232,7 +230,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -232,7 +230,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->reserved_xrcds : 0; dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0; dev_cap->max_xrcds : 0;
dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags; dev->caps.flags = dev_cap->flags;
...@@ -569,7 +568,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, ...@@ -569,7 +568,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base, init_hca->mtt_base,
dev->caps.mtt_entry_sz, dev->caps.mtt_entry_sz,
dev->caps.num_mtt_segs, dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0); dev->caps.reserved_mtts, 1, 0);
if (err) { if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
......
...@@ -191,6 +191,7 @@ do { \ ...@@ -191,6 +191,7 @@ do { \
dev_warn(&mdev->pdev->dev, format, ##arg) dev_warn(&mdev->pdev->dev, format, ##arg)
extern int mlx4_log_num_mgm_entry_size; extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
#define ALL_SLAVES 0xff #define ALL_SLAVES 0xff
...@@ -240,7 +241,7 @@ struct mlx4_mpt_entry { ...@@ -240,7 +241,7 @@ struct mlx4_mpt_entry {
__be32 win_cnt; __be32 win_cnt;
u8 reserved1[3]; u8 reserved1[3];
u8 mtt_rep; u8 mtt_rep;
__be64 mtt_seg; __be64 mtt_addr;
__be32 mtt_sz; __be32 mtt_sz;
__be32 entity_size; __be32 entity_size;
__be32 first_byte_offset; __be32 first_byte_offset;
......
...@@ -166,18 +166,24 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) ...@@ -166,18 +166,24 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{ {
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg; u32 seg;
int seg_order;
u32 offset;
seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); seg_order = max_t(int, order - log_mtts_per_seg, 0);
seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1) if (seg == -1)
return -1; return -1;
if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, offset = seg * (1 << log_mtts_per_seg);
seg + (1 << order) - 1)) {
mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
offset + (1 << order) - 1)) {
mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1; return -1;
} }
return seg; return offset;
} }
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
...@@ -212,45 +218,49 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, ...@@ -212,45 +218,49 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else } else
mtt->page_shift = page_shift; mtt->page_shift = page_shift;
for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order; ++mtt->order;
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
if (mtt->first_seg == -1) if (mtt->offset == -1)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mlx4_mtt_init); EXPORT_SYMBOL_GPL(mlx4_mtt_init);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
int order)
{ {
u32 first_seg;
int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, order); seg_order = max_t(int, order - log_mtts_per_seg, 0);
first_seg = offset / (1 << log_mtts_per_seg);
mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg, mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
first_seg + (1 << order) - 1); first_seg + (1 << seg_order) - 1);
} }
static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order) static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{ {
u64 in_param; u64 in_param;
int err; int err;
if (mlx4_is_mfunc(dev)) { if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, first_seg); set_param_l(&in_param, offset);
set_param_h(&in_param, order); set_param_h(&in_param, order);
err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
MLX4_CMD_FREE_RES, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
if (err) if (err)
mlx4_warn(dev, "Failed to free mtt range at:%d" mlx4_warn(dev, "Failed to free mtt range at:"
" order:%d\n", first_seg, order); "%d order:%d\n", offset, order);
return; return;
} }
__mlx4_free_mtt_range(dev, first_seg, order); __mlx4_free_mtt_range(dev, offset, order);
} }
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
...@@ -258,13 +268,13 @@ void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) ...@@ -258,13 +268,13 @@ void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
if (mtt->order < 0) if (mtt->order < 0)
return; return;
mlx4_free_mtt_range(dev, mtt->first_seg, mtt->order); mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
} }
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{ {
return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; return (u64) mtt->offset * dev->caps.mtt_entry_sz;
} }
EXPORT_SYMBOL_GPL(mlx4_mtt_addr); EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
...@@ -504,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -504,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order < 0) { if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_seg = 0; mpt_entry->mtt_addr = 0;
} else { } else {
mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
&mr->mtt));
} }
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
...@@ -514,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -514,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE); MLX4_MPT_PD_FLAG_RAE);
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
dev->caps.mtts_per_seg);
} else { } else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
} }
...@@ -548,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -548,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
__be64 *mtts; __be64 *mtts;
dma_addr_t dma_handle; dma_addr_t dma_handle;
int i; int i;
int s = start_index * sizeof (u64);
/* All MTTs must fit in the same page */
if (start_index / (PAGE_SIZE / sizeof (u64)) !=
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
return -EINVAL;
if (start_index & (dev->caps.mtts_per_seg - 1)) mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
return -EINVAL; start_index, &dma_handle);
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts) if (!mtts)
return -ENOMEM; return -ENOMEM;
...@@ -580,15 +582,25 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -580,15 +582,25 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
{ {
int err = 0; int err = 0;
int chunk; int chunk;
int mtts_per_page;
int max_mtts_first_page;
/* compute how may mtts fit in the first page */
mtts_per_page = PAGE_SIZE / sizeof(u64);
max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
% mtts_per_page;
chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) { while (npages > 0) {
chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err) if (err)
return err; return err;
npages -= chunk; npages -= chunk;
start_index += chunk; start_index += chunk;
page_list += chunk; page_list += chunk;
chunk = min_t(int, mtts_per_page, npages);
} }
return err; return err;
} }
...@@ -612,18 +624,9 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -612,18 +624,9 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
inbox = mailbox->buf; inbox = mailbox->buf;
while (npages > 0) { while (npages > 0) {
int s = mtt->first_seg * dev->caps.mtts_per_seg + chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
start_index; npages);
chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - inbox[0] = cpu_to_be64(mtt->offset + start_index);
dev->caps.mtts_per_seg, npages);
if (s / (PAGE_SIZE / sizeof(u64)) !=
(s + chunk - 1) / (PAGE_SIZE / sizeof(u64)))
chunk = PAGE_SIZE / sizeof(u64) -
(s % (PAGE_SIZE / sizeof(u64)));
inbox[0] = cpu_to_be64(mtt->first_seg *
dev->caps.mtts_per_seg +
start_index);
inbox[1] = 0; inbox[1] = 0;
for (i = 0; i < chunk; ++i) for (i = 0; i < chunk; ++i)
inbox[i + 2] = cpu_to_be64(page_list[i] | inbox[i + 2] = cpu_to_be64(page_list[i] |
...@@ -690,7 +693,8 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) ...@@ -690,7 +693,8 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
return err; return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy, err = mlx4_buddy_init(&mr_table->mtt_buddy,
ilog2(dev->caps.num_mtt_segs)); ilog2(dev->caps.num_mtts /
(1 << log_mtts_per_seg)));
if (err) if (err)
goto err_buddy; goto err_buddy;
...@@ -809,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, ...@@ -809,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr) int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
u64 mtt_seg; u64 mtt_offset;
int err = -ENOMEM; int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
...@@ -829,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, ...@@ -829,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
if (err) if (err)
return err; return err;
mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
fmr->mr.mtt.first_seg, fmr->mr.mtt.offset,
&fmr->dma_handle); &fmr->dma_handle);
if (!fmr->mtts) { if (!fmr->mtts) {
err = -ENOMEM; err = -ENOMEM;
goto err_free; goto err_free;
...@@ -872,7 +877,7 @@ static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, ...@@ -872,7 +877,7 @@ static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
return err; return err;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
fmr->mr.mtt.first_seg, fmr->mr.mtt.offset,
&fmr->dma_handle); &fmr->dma_handle);
if (!fmr->mtts) { if (!fmr->mtts) {
err = -ENOMEM; err = -ENOMEM;
......
...@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, ...@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp; profile[MLX4_RES_QP].num = request->num_qp;
...@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, ...@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->cmpt_base = profile[i].start; init_hca->cmpt_base = profile[i].start;
break; break;
case MLX4_RES_MTT: case MLX4_RES_MTT:
dev->caps.num_mtt_segs = profile[i].num; dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start; priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start; init_hca->mtt_base = profile[i].start;
break; break;
......
...@@ -1550,9 +1550,9 @@ static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) ...@@ -1550,9 +1550,9 @@ static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
return (be32_to_cpu(mpt->flags) >> 9) & 1; return (be32_to_cpu(mpt->flags) >> 9) & 1;
} }
static int mr_get_mtt_seg(struct mlx4_mpt_entry *mpt) static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
{ {
return (int)be64_to_cpu(mpt->mtt_seg) & 0xfffffff8; return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
} }
static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
...@@ -1565,12 +1565,12 @@ static int mr_get_pdn(struct mlx4_mpt_entry *mpt) ...@@ -1565,12 +1565,12 @@ static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
return be32_to_cpu(mpt->pd_flags) & 0xffffff; return be32_to_cpu(mpt->pd_flags) & 0xffffff;
} }
static int qp_get_mtt_seg(struct mlx4_qp_context *qpc) static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{ {
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
} }
static int srq_get_mtt_seg(struct mlx4_srq_context *srqc) static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
{ {
return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
} }
...@@ -1614,8 +1614,8 @@ static int pdn2slave(int pdn) ...@@ -1614,8 +1614,8 @@ static int pdn2slave(int pdn)
static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
int size, struct res_mtt *mtt) int size, struct res_mtt *mtt)
{ {
int res_start = mtt->com.res_id * dev->caps.mtts_per_seg; int res_start = mtt->com.res_id;
int res_size = (1 << mtt->order) * dev->caps.mtts_per_seg; int res_size = (1 << mtt->order);
if (start < res_start || start + size > res_start + res_size) if (start < res_start || start + size > res_start + res_size)
return -EPERM; return -EPERM;
...@@ -1632,8 +1632,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1632,8 +1632,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int index = vhcr->in_modifier; int index = vhcr->in_modifier;
struct res_mtt *mtt; struct res_mtt *mtt;
struct res_mpt *mpt; struct res_mpt *mpt;
int mtt_base = (mr_get_mtt_seg(inbox->buf) / dev->caps.mtt_entry_sz) * int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
dev->caps.mtts_per_seg;
int phys; int phys;
int id; int id;
...@@ -1644,8 +1643,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1644,8 +1643,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
phys = mr_phys_mpt(inbox->buf); phys = mr_phys_mpt(inbox->buf);
if (!phys) { if (!phys) {
err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
RES_MTT, &mtt);
if (err) if (err)
goto ex_abort; goto ex_abort;
...@@ -1769,8 +1767,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1769,8 +1767,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct res_mtt *mtt; struct res_mtt *mtt;
struct res_qp *qp; struct res_qp *qp;
struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_qp_context *qpc = inbox->buf + 8;
int mtt_base = (qp_get_mtt_seg(qpc) / dev->caps.mtt_entry_sz) * int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
dev->caps.mtts_per_seg;
int mtt_size = qp_get_mtt_size(qpc); int mtt_size = qp_get_mtt_size(qpc);
struct res_cq *rcq; struct res_cq *rcq;
struct res_cq *scq; struct res_cq *scq;
...@@ -1786,8 +1783,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1786,8 +1783,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return err; return err;
qp->local_qpn = local_qpn; qp->local_qpn = local_qpn;
err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
&mtt);
if (err) if (err)
goto ex_abort; goto ex_abort;
...@@ -1836,7 +1832,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1836,7 +1832,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
qp->srq = srq; qp->srq = srq;
} }
put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT); put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn); res_end_move(dev, slave, RES_QP, qpn);
return 0; return 0;
...@@ -1850,14 +1846,14 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1850,14 +1846,14 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
ex_put_rcq: ex_put_rcq:
put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, rcqn, RES_CQ);
ex_put_mtt: ex_put_mtt:
put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT); put_res(dev, slave, mtt_base, RES_MTT);
ex_abort: ex_abort:
res_abort_move(dev, slave, RES_QP, qpn); res_abort_move(dev, slave, RES_QP, qpn);
return err; return err;
} }
static int eq_get_mtt_seg(struct mlx4_eq_context *eqc) static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
{ {
return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
} }
...@@ -1873,7 +1869,7 @@ static int eq_get_mtt_size(struct mlx4_eq_context *eqc) ...@@ -1873,7 +1869,7 @@ static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
return 1 << (log_eq_size + 5 - page_shift); return 1 << (log_eq_size + 5 - page_shift);
} }
static int cq_get_mtt_seg(struct mlx4_cq_context *cqc) static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
{ {
return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
} }
...@@ -1899,8 +1895,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1899,8 +1895,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
int eqn = vhcr->in_modifier; int eqn = vhcr->in_modifier;
int res_id = (slave << 8) | eqn; int res_id = (slave << 8) | eqn;
struct mlx4_eq_context *eqc = inbox->buf; struct mlx4_eq_context *eqc = inbox->buf;
int mtt_base = (eq_get_mtt_seg(eqc) / dev->caps.mtt_entry_sz) * int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
dev->caps.mtts_per_seg;
int mtt_size = eq_get_mtt_size(eqc); int mtt_size = eq_get_mtt_size(eqc);
struct res_eq *eq; struct res_eq *eq;
struct res_mtt *mtt; struct res_mtt *mtt;
...@@ -1912,8 +1907,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1912,8 +1907,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
if (err) if (err)
goto out_add; goto out_add;
err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
&mtt);
if (err) if (err)
goto out_move; goto out_move;
...@@ -1986,7 +1980,8 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1986,7 +1980,8 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
/* Call the SW implementation of write_mtt: /* Call the SW implementation of write_mtt:
* - Prepare a dummy mtt struct * - Prepare a dummy mtt struct
* - Translate inbox contents to simple addresses in host endianess */ * - Translate inbox contents to simple addresses in host endianess */
mtt.first_seg = 0; mtt.offset = 0; /* TBD this is broken but I don't handle it since
we don't really use it */
mtt.order = 0; mtt.order = 0;
mtt.page_shift = 0; mtt.page_shift = 0;
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
...@@ -2137,16 +2132,14 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2137,16 +2132,14 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
int err; int err;
int cqn = vhcr->in_modifier; int cqn = vhcr->in_modifier;
struct mlx4_cq_context *cqc = inbox->buf; struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) * int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
dev->caps.mtts_per_seg;
struct res_cq *cq; struct res_cq *cq;
struct res_mtt *mtt; struct res_mtt *mtt;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
if (err) if (err)
return err; return err;
err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
&mtt);
if (err) if (err)
goto out_move; goto out_move;
err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
...@@ -2228,8 +2221,7 @@ static int handle_resize(struct mlx4_dev *dev, int slave, ...@@ -2228,8 +2221,7 @@ static int handle_resize(struct mlx4_dev *dev, int slave,
struct res_mtt *orig_mtt; struct res_mtt *orig_mtt;
struct res_mtt *mtt; struct res_mtt *mtt;
struct mlx4_cq_context *cqc = inbox->buf; struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) * int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
dev->caps.mtts_per_seg;
err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
if (err) if (err)
...@@ -2240,8 +2232,7 @@ static int handle_resize(struct mlx4_dev *dev, int slave, ...@@ -2240,8 +2232,7 @@ static int handle_resize(struct mlx4_dev *dev, int slave,
goto ex_put; goto ex_put;
} }
err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
&mtt);
if (err) if (err)
goto ex_put; goto ex_put;
...@@ -2325,8 +2316,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2325,8 +2316,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct res_mtt *mtt; struct res_mtt *mtt;
struct res_srq *srq; struct res_srq *srq;
struct mlx4_srq_context *srqc = inbox->buf; struct mlx4_srq_context *srqc = inbox->buf;
int mtt_base = (srq_get_mtt_seg(srqc) / dev->caps.mtt_entry_sz) * int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
dev->caps.mtts_per_seg;
if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
return -EINVAL; return -EINVAL;
...@@ -2334,8 +2324,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2334,8 +2324,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
if (err) if (err)
return err; return err;
err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
RES_MTT, &mtt);
if (err) if (err)
goto ex_abort; goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
......
...@@ -272,8 +272,7 @@ struct mlx4_caps { ...@@ -272,8 +272,7 @@ struct mlx4_caps {
int num_comp_vectors; int num_comp_vectors;
int comp_pool; int comp_pool;
int num_mpts; int num_mpts;
int num_mtt_segs; int num_mtts;
int mtts_per_seg;
int fmr_reserved_mtts; int fmr_reserved_mtts;
int reserved_mtts; int reserved_mtts;
int reserved_mrws; int reserved_mrws;
...@@ -323,7 +322,7 @@ struct mlx4_buf { ...@@ -323,7 +322,7 @@ struct mlx4_buf {
}; };
struct mlx4_mtt { struct mlx4_mtt {
u32 first_seg; u32 offset;
int order; int order;
int page_shift; int page_shift;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment