Commit 313abe55 authored by Jack Morgenstein's avatar Jack Morgenstein Committed by Roland Dreier

mlx4_core: For 64-bit systems, vmap() kernel queue buffers

Since kernel virtual memory is not a problem on 64-bit systems, there
is no reason to use our own 2-layer page mapping scheme for large
kernel queue buffers on such systems.  Instead, map the page list to a
single virtually contiguous buffer with vmap(), so that can we access
buffer memory via direct indexing.
Signed-off-by: default avatarMichael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 1c69fc2a
...@@ -151,6 +151,19 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, ...@@ -151,6 +151,19 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
} }
if (BITS_PER_LONG == 64) {
struct page **pages;
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->u.page_list[i].buf);
buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->u.direct.buf)
goto err_free;
}
} }
return 0; return 0;
...@@ -170,6 +183,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) ...@@ -170,6 +183,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
buf->u.direct.map); buf->u.direct.map);
else { else {
if (BITS_PER_LONG == 64)
vunmap(buf->u.direct.buf);
for (i = 0; i < buf->nbufs; ++i) for (i = 0; i < buf->nbufs; ++i)
if (buf->u.page_list[i].buf) if (buf->u.page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
......
...@@ -189,7 +189,7 @@ struct mlx4_buf_list { ...@@ -189,7 +189,7 @@ struct mlx4_buf_list {
}; };
struct mlx4_buf { struct mlx4_buf {
union { struct {
struct mlx4_buf_list direct; struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list; struct mlx4_buf_list *page_list;
} u; } u;
...@@ -310,7 +310,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, ...@@ -310,7 +310,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{ {
if (buf->nbufs == 1) if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return buf->u.direct.buf + offset; return buf->u.direct.buf + offset;
else else
return buf->u.page_list[offset >> PAGE_SHIFT].buf + return buf->u.page_list[offset >> PAGE_SHIFT].buf +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment