Commit cc491d8e authored by Daniel Jordan's avatar Daniel Jordan Committed by Herbert Xu

padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible

With pcrypt's cpumask no longer used, take the CPU hotplug lock inside
padata_alloc_possible.

Useful later in the series for avoiding nested acquisition of the CPU
hotplug lock in padata when padata_alloc_possible is allocating an
unbound workqueue.

Without this patch, this nested acquisition would happen later in the
series:

      pcrypt_init_padata
        get_online_cpus
        alloc_padata_possible
          alloc_padata
            alloc_workqueue(WQ_UNBOUND)   // later in the series
              alloc_and_link_pwqs
                apply_wqattrs_lock
                  get_online_cpus         // recursive rwsem acquisition
Signed-off-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 63d35788
...@@ -308,8 +308,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) ...@@ -308,8 +308,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
get_online_cpus();
*pinst = padata_alloc_possible(name); *pinst = padata_alloc_possible(name);
if (!*pinst) if (!*pinst)
return ret; return ret;
...@@ -318,8 +316,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) ...@@ -318,8 +316,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
if (ret) if (ret)
padata_free(*pinst); padata_free(*pinst);
put_online_cpus();
return ret; return ret;
} }
......
...@@ -955,8 +955,6 @@ static struct kobj_type padata_attr_type = { ...@@ -955,8 +955,6 @@ static struct kobj_type padata_attr_type = {
* @name: used to identify the instance * @name: used to identify the instance
* @pcpumask: cpumask that will be used for padata parallelization * @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization * @cbcpumask: cpumask that will be used for padata serialization
*
* Must be called from a cpus_read_lock() protected region
*/ */
static struct padata_instance *padata_alloc(const char *name, static struct padata_instance *padata_alloc(const char *name,
const struct cpumask *pcpumask, const struct cpumask *pcpumask,
...@@ -974,11 +972,13 @@ static struct padata_instance *padata_alloc(const char *name, ...@@ -974,11 +972,13 @@ static struct padata_instance *padata_alloc(const char *name,
if (!pinst->wq) if (!pinst->wq)
goto err_free_inst; goto err_free_inst;
get_online_cpus();
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_wq; goto err_put_cpus;
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu);
goto err_free_wq; goto err_put_cpus;
} }
if (!padata_validate_cpumask(pinst, pcpumask) || if (!padata_validate_cpumask(pinst, pcpumask) ||
!padata_validate_cpumask(pinst, cbcpumask)) !padata_validate_cpumask(pinst, cbcpumask))
...@@ -1002,12 +1002,16 @@ static struct padata_instance *padata_alloc(const char *name, ...@@ -1002,12 +1002,16 @@ static struct padata_instance *padata_alloc(const char *name,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
#endif #endif
put_online_cpus();
return pinst; return pinst;
err_free_masks: err_free_masks:
free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu); free_cpumask_var(pinst->cpumask.cbcpu);
err_free_wq: err_put_cpus:
put_online_cpus();
destroy_workqueue(pinst->wq); destroy_workqueue(pinst->wq);
err_free_inst: err_free_inst:
kfree(pinst); kfree(pinst);
...@@ -1021,12 +1025,9 @@ static struct padata_instance *padata_alloc(const char *name, ...@@ -1021,12 +1025,9 @@ static struct padata_instance *padata_alloc(const char *name,
* parallel workers. * parallel workers.
* *
* @name: used to identify the instance * @name: used to identify the instance
*
* Must be called from a cpus_read_lock() protected region
*/ */
struct padata_instance *padata_alloc_possible(const char *name) struct padata_instance *padata_alloc_possible(const char *name)
{ {
lockdep_assert_cpus_held();
return padata_alloc(name, cpu_possible_mask, cpu_possible_mask); return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
} }
EXPORT_SYMBOL(padata_alloc_possible); EXPORT_SYMBOL(padata_alloc_possible);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment