Commit b57aacfa authored by Roland Dreier's avatar Roland Dreier

mlx4_core: Clean up struct mlx4_buf

Now that struct mlx4_buf.u is a struct instead of a union because of
the vmap() changes, there's no point in having a struct at all.  So
move .direct and .page_list directly into struct mlx4_buf and get rid
of a bunch of unnecessary ".u"s.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 313abe55
......@@ -116,40 +116,40 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!buf->u.direct.buf)
if (!buf->direct.buf)
return -ENOMEM;
buf->u.direct.map = t;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
memset(buf->u.direct.buf, 0, size);
memset(buf->direct.buf, 0, size);
} else {
int i;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
GFP_KERNEL);
if (!buf->u.page_list)
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->u.page_list[i].buf =
buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!buf->u.page_list[i].buf)
if (!buf->page_list[i].buf)
goto err_free;
buf->u.page_list[i].map = t;
buf->page_list[i].map = t;
memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (BITS_PER_LONG == 64) {
......@@ -158,10 +158,10 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->u.page_list[i].buf);
buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->u.direct.buf)
if (!buf->direct.buf)
goto err_free;
}
}
......@@ -180,18 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
int i;
if (buf->nbufs == 1)
dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
buf->u.direct.map);
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->direct.map);
else {
if (BITS_PER_LONG == 64)
vunmap(buf->u.direct.buf);
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->u.page_list[i].buf)
if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->u.page_list[i].buf,
buf->u.page_list[i].map);
kfree(buf->u.page_list);
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
......@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
for (i = 0; i < buf->npages; ++i)
if (buf->nbufs == 1)
page_list[i] = buf->u.direct.map + (i << buf->page_shift);
page_list[i] = buf->direct.map + (i << buf->page_shift);
else
page_list[i] = buf->u.page_list[i].map;
page_list[i] = buf->page_list[i].map;
err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
......
......@@ -189,10 +189,8 @@ struct mlx4_buf_list {
};
struct mlx4_buf {
struct {
struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list;
} u;
int nbufs;
int npages;
int page_shift;
......@@ -311,9 +309,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return buf->u.direct.buf + offset;
return buf->direct.buf + offset;
else
return buf->u.page_list[offset >> PAGE_SHIFT].buf +
return buf->page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment