Commit 8d3ef29d authored by Ishai Rabinovitz's avatar Ishai Rabinovitz Committed by Roland Dreier

IB/mthca: Use an enum for HCA page size

Use a named enum for the HCA's internal page size, rather than having
magic values of 4096 and shifts by 12 all over the code.  Also, fix
one minor bug in EQ handling: only one HCA page is mapped to the HCA
during initialization, but a full kernel page is unmapped during
cleanup.  This might cause problems when PAGE_SIZE != 4096.
Signed-off-by: default avatarIshai Rabinovitz <ishai@mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 67e73776
...@@ -652,8 +652,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, ...@@ -652,8 +652,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
* address or size and use that as our log2 size. * address or size and use that as our log2 size.
*/ */
lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1; lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
if (lg < 12) { if (lg < MTHCA_ICM_PAGE_SHIFT) {
mthca_warn(dev, "Got FW area not aligned to 4K (%llx/%lx).\n", mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
MTHCA_ICM_PAGE_SIZE,
(unsigned long long) mthca_icm_addr(&iter), (unsigned long long) mthca_icm_addr(&iter),
mthca_icm_size(&iter)); mthca_icm_size(&iter));
err = -EINVAL; err = -EINVAL;
...@@ -665,8 +666,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, ...@@ -665,8 +666,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
virt += 1 << lg; virt += 1 << lg;
} }
pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) + pages[nent * 2 + 1] =
(i << lg)) | (lg - 12)); cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
(lg - MTHCA_ICM_PAGE_SHIFT));
ts += 1 << (lg - 10); ts += 1 << (lg - 10);
++tc; ++tc;
...@@ -822,12 +824,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) ...@@ -822,12 +824,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2); mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
/* /*
* Arbel page size is always 4 KB; round up number of * Round up number of system pages needed in case
* system pages needed. * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
*/ */
dev->fw.arbel.fw_pages = dev->fw.arbel.fw_pages =
ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >> ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
(PAGE_SHIFT - 12); (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
(unsigned long long) dev->fw.arbel.clr_int_base, (unsigned long long) dev->fw.arbel.clr_int_base,
...@@ -1540,11 +1542,11 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, ...@@ -1540,11 +1542,11 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
return ret; return ret;
/* /*
* Arbel page size is always 4 KB; round up number of system * Round up number of system pages needed in case
* pages needed. * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
*/ */
*aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12); (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
return 0; return 0;
} }
......
...@@ -825,7 +825,7 @@ void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) ...@@ -825,7 +825,7 @@ void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
{ {
u8 status; u8 status;
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status); mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page); __free_page(dev->eq_table.icm_page);
......
...@@ -202,7 +202,8 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o ...@@ -202,7 +202,8 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
if (--table->icm[i]->refcount == 0) { if (--table->icm[i]->refcount == 0) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE >> 12, &status); MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
&status);
mthca_free_icm(dev, table->icm[i]); mthca_free_icm(dev, table->icm[i]);
table->icm[i] = NULL; table->icm[i] = NULL;
} }
...@@ -336,7 +337,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, ...@@ -336,7 +337,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
for (i = 0; i < num_icm; ++i) for (i = 0; i < num_icm; ++i)
if (table->icm[i]) { if (table->icm[i]) {
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE >> 12, &status); MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
&status);
mthca_free_icm(dev, table->icm[i]); mthca_free_icm(dev, table->icm[i]);
} }
...@@ -353,7 +355,8 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) ...@@ -353,7 +355,8 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
for (i = 0; i < table->num_icm; ++i) for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) { if (table->icm[i]) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE >> 12, &status); MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
&status);
mthca_free_icm(dev, table->icm[i]); mthca_free_icm(dev, table->icm[i]);
} }
...@@ -364,7 +367,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag ...@@ -364,7 +367,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
{ {
return dev->uar_table.uarc_base + return dev->uar_table.uarc_base +
uar->index * dev->uar_table.uarc_size + uar->index * dev->uar_table.uarc_size +
page * 4096; page * MTHCA_ICM_PAGE_SIZE;
} }
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
...@@ -401,7 +404,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -401,7 +404,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
if (ret < 0) if (ret < 0)
goto out; goto out;
db_tab->page[i].mem.length = 4096; db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
...@@ -455,7 +458,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) ...@@ -455,7 +458,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
if (!mthca_is_memfree(dev)) if (!mthca_is_memfree(dev))
return NULL; return NULL;
npages = dev->uar_table.uarc_size / 4096; npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
if (!db_tab) if (!db_tab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -478,7 +481,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -478,7 +481,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
if (!mthca_is_memfree(dev)) if (!mthca_is_memfree(dev))
return; return;
for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) { for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) { if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
...@@ -551,20 +554,20 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, ...@@ -551,20 +554,20 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end; page = dev->db_tab->page + end;
alloc: alloc:
page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
&page->mapping, GFP_KERNEL); &page->mapping, GFP_KERNEL);
if (!page->db_rec) { if (!page->db_rec) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
memset(page->db_rec, 0, 4096); memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping, ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i), &status); mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
if (!ret && status) if (!ret && status)
ret = -EINVAL; ret = -EINVAL;
if (ret) { if (ret) {
dma_free_coherent(&dev->pdev->dev, 4096, dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping); page->db_rec, page->mapping);
goto out; goto out;
} }
...@@ -612,7 +615,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index) ...@@ -612,7 +615,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
i >= dev->db_tab->max_group1 - 1) { i >= dev->db_tab->max_group1 - 1) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
dma_free_coherent(&dev->pdev->dev, 4096, dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping); page->db_rec, page->mapping);
page->db_rec = NULL; page->db_rec = NULL;
...@@ -640,7 +643,7 @@ int mthca_init_db_tab(struct mthca_dev *dev) ...@@ -640,7 +643,7 @@ int mthca_init_db_tab(struct mthca_dev *dev)
mutex_init(&dev->db_tab->mutex); mutex_init(&dev->db_tab->mutex);
dev->db_tab->npages = dev->uar_table.uarc_size / 4096; dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
dev->db_tab->max_group1 = 0; dev->db_tab->max_group1 = 0;
dev->db_tab->min_group2 = dev->db_tab->npages - 1; dev->db_tab->min_group2 = dev->db_tab->npages - 1;
...@@ -681,7 +684,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev) ...@@ -681,7 +684,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
dma_free_coherent(&dev->pdev->dev, 4096, dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
dev->db_tab->page[i].db_rec, dev->db_tab->page[i].db_rec,
dev->db_tab->page[i].mapping); dev->db_tab->page[i].mapping);
} }
......
...@@ -45,6 +45,12 @@ ...@@ -45,6 +45,12 @@
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
(sizeof (struct scatterlist))) (sizeof (struct scatterlist)))
enum {
MTHCA_ICM_PAGE_SHIFT = 12,
MTHCA_ICM_PAGE_SIZE = 1 << MTHCA_ICM_PAGE_SHIFT,
MTHCA_DB_REC_PER_PAGE = MTHCA_ICM_PAGE_SIZE / 8
};
struct mthca_icm_chunk { struct mthca_icm_chunk {
struct list_head list; struct list_head list;
int npages; int npages;
...@@ -131,10 +137,6 @@ static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter) ...@@ -131,10 +137,6 @@ static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]); return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
} }
enum {
MTHCA_DB_REC_PER_PAGE = 4096 / 8
};
struct mthca_db_page { struct mthca_db_page {
DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
__be64 *db_rec; __be64 *db_rec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment