Commit 994ea2f4 authored by Cédric Le Goater's avatar Cédric Le Goater Committed by Michael Ellerman

powerpc/xive: introduce a common routine xive_queue_page_alloc()

This routine will be used in the spapr backend. Also introduce a short
xive_alloc_order() helper.
Signed-off-by: default avatarCédric Le Goater <clg@kaod.org>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Acked-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 3b79b261
...@@ -1428,6 +1428,22 @@ bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 o ...@@ -1428,6 +1428,22 @@ bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 o
return true; return true;
} }
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
{
unsigned int alloc_order;
struct page *pages;
__be32 *qpage;
alloc_order = xive_alloc_order(queue_shift);
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
if (!pages)
return ERR_PTR(-ENOMEM);
qpage = (__be32 *)page_address(pages);
memset(qpage, 0, 1 << queue_shift);
return qpage;
}
static int __init xive_off(char *arg) static int __init xive_off(char *arg)
{ {
xive_cmdline_disabled = true; xive_cmdline_disabled = true;
......
...@@ -202,17 +202,12 @@ EXPORT_SYMBOL_GPL(xive_native_disable_queue); ...@@ -202,17 +202,12 @@ EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio) static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{ {
struct xive_q *q = &xc->queue[prio]; struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order;
struct page *pages;
__be32 *qpage; __be32 *qpage;
alloc_order = (xive_queue_shift > PAGE_SHIFT) ? qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
(xive_queue_shift - PAGE_SHIFT) : 0; if (IS_ERR(qpage))
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); return PTR_ERR(qpage);
if (!pages)
return -ENOMEM;
qpage = (__be32 *)page_address(pages);
memset(qpage, 0, 1 << xive_queue_shift);
return xive_native_configure_queue(get_hard_smp_processor_id(cpu), return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
q, prio, qpage, xive_queue_shift, false); q, prio, qpage, xive_queue_shift, false);
} }
...@@ -227,8 +222,7 @@ static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 ...@@ -227,8 +222,7 @@ static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8
* from an IPI and iounmap isn't safe * from an IPI and iounmap isn't safe
*/ */
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio); __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
alloc_order = (xive_queue_shift > PAGE_SHIFT) ? alloc_order = xive_alloc_order(xive_queue_shift);
(xive_queue_shift - PAGE_SHIFT) : 0;
free_pages((unsigned long)q->qpage, alloc_order); free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL; q->qpage = NULL;
} }
......
...@@ -56,6 +56,12 @@ struct xive_ops { ...@@ -56,6 +56,12 @@ struct xive_ops {
bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
u8 max_prio); u8 max_prio);
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
static inline u32 xive_alloc_order(u32 queue_shift)
{
return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
}
extern bool xive_cmdline_disabled; extern bool xive_cmdline_disabled;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment