Commit 8688ff94 authored by Roland Dreier's avatar Roland Dreier Committed by Linus Torvalds

[PATCH] IB/mthca: refactor CQ buffer allocate/free

Factor the allocation and freeing of completion queue buffers into
mthca_alloc_cq_buf() and mthca_free_cq_buf().  This makes the code
more readable and will eventually make handling userspace CQs simpler
(the kernel doesn't have to allocate a buffer at all).
Signed-off-by: default avatarRoland Dreier <roland@topspin.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 2e922f5a
...@@ -557,32 +557,40 @@ void mthca_arm_cq(struct mthca_dev *dev, struct mthca_cq *cq, ...@@ -557,32 +557,40 @@ void mthca_arm_cq(struct mthca_dev *dev, struct mthca_cq *cq,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
} }
int mthca_init_cq(struct mthca_dev *dev, int nent, static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
struct mthca_cq *cq)
{ {
int size = nent * MTHCA_CQ_ENTRY_SIZE;
dma_addr_t t;
void *mailbox = NULL;
int npages, shift;
u64 *dma_list = NULL;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
u8 status;
int i; int i;
int size;
might_sleep(); if (cq->is_direct)
pci_free_consistent(dev->pdev,
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
cq->queue.direct.buf,
pci_unmap_addr(&cq->queue.direct,
mapping));
else {
size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
if (cq->queue.page_list[i].buf)
pci_free_consistent(dev->pdev, PAGE_SIZE,
cq->queue.page_list[i].buf,
pci_unmap_addr(&cq->queue.page_list[i],
mapping));
mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, kfree(cq->queue.page_list);
GFP_KERNEL); }
if (!mailbox) }
goto err_out;
cq_context = MAILBOX_ALIGN(mailbox); static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
struct mthca_cq *cq)
{
int err = -ENOMEM;
int npages, shift;
u64 *dma_list = NULL;
dma_addr_t t;
int i;
if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) { if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
if (0)
mthca_dbg(dev, "Creating direct CQ of size %d\n", size);
cq->is_direct = 1; cq->is_direct = 1;
npages = 1; npages = 1;
shift = get_order(size) + PAGE_SHIFT; shift = get_order(size) + PAGE_SHIFT;
...@@ -590,7 +598,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -590,7 +598,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq->queue.direct.buf = pci_alloc_consistent(dev->pdev, cq->queue.direct.buf = pci_alloc_consistent(dev->pdev,
size, &t); size, &t);
if (!cq->queue.direct.buf) if (!cq->queue.direct.buf)
goto err_out; return -ENOMEM;
pci_unmap_addr_set(&cq->queue.direct, mapping, t); pci_unmap_addr_set(&cq->queue.direct, mapping, t);
...@@ -603,7 +611,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -603,7 +611,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list) if (!dma_list)
goto err_out_free; goto err_free;
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
dma_list[i] = t + i * (1 << shift); dma_list[i] = t + i * (1 << shift);
...@@ -612,12 +620,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -612,12 +620,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
shift = PAGE_SHIFT; shift = PAGE_SHIFT;
if (0)
mthca_dbg(dev, "Creating indirect CQ with %d pages\n", npages);
dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list) if (!dma_list)
goto err_out; return -ENOMEM;
cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list, cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
GFP_KERNEL); GFP_KERNEL);
...@@ -631,7 +636,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -631,7 +636,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq->queue.page_list[i].buf = cq->queue.page_list[i].buf =
pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
if (!cq->queue.page_list[i].buf) if (!cq->queue.page_list[i].buf)
goto err_out_free; goto err_free;
dma_list[i] = t; dma_list[i] = t;
pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t); pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
...@@ -640,13 +645,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -640,13 +645,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
} }
} }
for (i = 0; i < nent; ++i)
set_cqe_hw(get_cqe(cq, i));
cq->cqn = mthca_alloc(&dev->cq_table.alloc);
if (cq->cqn == -1)
goto err_out_free;
err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
dma_list, shift, npages, dma_list, shift, npages,
0, size, 0, size,
...@@ -654,7 +652,52 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -654,7 +652,52 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
MTHCA_MPT_FLAG_LOCAL_READ, MTHCA_MPT_FLAG_LOCAL_READ,
&cq->mr); &cq->mr);
if (err) if (err)
goto err_out_free_cq; goto err_free;
kfree(dma_list);
return 0;
err_free:
mthca_free_cq_buf(dev, cq);
err_out:
kfree(dma_list);
return err;
}
int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_cq *cq)
{
int size = nent * MTHCA_CQ_ENTRY_SIZE;
void *mailbox = NULL;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
u8 status;
int i;
might_sleep();
cq->ibcq.cqe = nent - 1;
cq->cqn = mthca_alloc(&dev->cq_table.alloc);
if (cq->cqn == -1)
return -ENOMEM;
mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox)
goto err_out;
cq_context = MAILBOX_ALIGN(mailbox);
err = mthca_alloc_cq_buf(dev, size, cq);
if (err)
goto err_out_mailbox;
for (i = 0; i < nent; ++i)
set_cqe_hw(get_cqe(cq, i));
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
atomic_set(&cq->refcount, 1); atomic_set(&cq->refcount, 1);
...@@ -697,37 +740,20 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -697,37 +740,20 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq->cons_index = 0; cq->cons_index = 0;
kfree(dma_list);
kfree(mailbox); kfree(mailbox);
return 0; return 0;
err_out_free_mr: err_out_free_mr:
mthca_free_mr(dev, &cq->mr); mthca_free_mr(dev, &cq->mr);
mthca_free_cq_buf(dev, cq);
err_out_free_cq: err_out_mailbox:
mthca_free(&dev->cq_table.alloc, cq->cqn);
err_out_free:
if (cq->is_direct)
pci_free_consistent(dev->pdev, size,
cq->queue.direct.buf,
pci_unmap_addr(&cq->queue.direct, mapping));
else {
for (i = 0; i < npages; ++i)
if (cq->queue.page_list[i].buf)
pci_free_consistent(dev->pdev, PAGE_SIZE,
cq->queue.page_list[i].buf,
pci_unmap_addr(&cq->queue.page_list[i],
mapping));
kfree(cq->queue.page_list);
}
err_out:
kfree(dma_list);
kfree(mailbox); kfree(mailbox);
err_out:
mthca_free(&dev->cq_table.alloc, cq->cqn);
return err; return err;
} }
...@@ -778,27 +804,7 @@ void mthca_free_cq(struct mthca_dev *dev, ...@@ -778,27 +804,7 @@ void mthca_free_cq(struct mthca_dev *dev,
wait_event(cq->wait, !atomic_read(&cq->refcount)); wait_event(cq->wait, !atomic_read(&cq->refcount));
mthca_free_mr(dev, &cq->mr); mthca_free_mr(dev, &cq->mr);
mthca_free_cq_buf(dev, cq);
if (cq->is_direct)
pci_free_consistent(dev->pdev,
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
cq->queue.direct.buf,
pci_unmap_addr(&cq->queue.direct,
mapping));
else {
int i;
for (i = 0;
i < ((cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE + PAGE_SIZE - 1) /
PAGE_SIZE;
++i)
pci_free_consistent(dev->pdev, PAGE_SIZE,
cq->queue.page_list[i].buf,
pci_unmap_addr(&cq->queue.page_list[i],
mapping));
kfree(cq->queue.page_list);
}
mthca_free(&dev->cq_table.alloc, cq->cqn); mthca_free(&dev->cq_table.alloc, cq->cqn);
kfree(mailbox); kfree(mailbox);
......
...@@ -408,8 +408,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries) ...@@ -408,8 +408,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries)
if (err) { if (err) {
kfree(cq); kfree(cq);
cq = ERR_PTR(err); cq = ERR_PTR(err);
} else }
cq->ibcq.cqe = nent - 1;
return &cq->ibcq; return &cq->ibcq;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment