Commit 8f6b7676 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 - Fix for CPU hotplug hang in padata.
 - Avoid using cpu_active inappropriately in pcrypt and padata.
 - Fix for user-space algorithm lookup hang with IV generators.
 - Fix for netlink dump of algorithms where stuff went missing due to
   incorrect calculation of message size.

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: user - Fix size of netlink dump message
  crypto: user - Fix lookup of algorithms with IV generator
  crypto: pcrypt - Use the online cpumask as the default
  padata: Fix cpu hotplug
  padata: Use the online cpumask as the default
  padata: Add a reference to the api documentation
parents 143418d0 5219a534
......@@ -613,8 +613,7 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
return err;
}
static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
u32 mask)
struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
......@@ -652,6 +651,7 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
return ERR_PTR(crypto_givcipher_default(alg, type, mask));
}
EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask)
......
......@@ -470,8 +470,7 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
return err;
}
static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
u32 mask)
struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
......@@ -503,6 +502,7 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
}
EXPORT_SYMBOL_GPL(crypto_lookup_aead);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask)
......
......@@ -21,9 +21,13 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptouser.h>
#include <linux/sched.h>
#include <net/netlink.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "internal.h"
DEFINE_MUTEX(crypto_cfg_mutex);
......@@ -301,6 +305,60 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
return crypto_unregister_instance(alg);
}
static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
u32 mask)
{
int err;
struct crypto_alg *alg;
type = crypto_skcipher_type(type);
mask = crypto_skcipher_mask(mask);
for (;;) {
alg = crypto_lookup_skcipher(name, type, mask);
if (!IS_ERR(alg))
return alg;
err = PTR_ERR(alg);
if (err != -EAGAIN)
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
}
return ERR_PTR(err);
}
static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
u32 mask)
{
int err;
struct crypto_alg *alg;
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_AEAD;
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
mask |= CRYPTO_ALG_TYPE_MASK;
for (;;) {
alg = crypto_lookup_aead(name, type, mask);
if (!IS_ERR(alg))
return alg;
err = PTR_ERR(alg);
if (err != -EAGAIN)
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
}
return ERR_PTR(err);
}
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
......@@ -325,7 +383,19 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
else
name = p->cru_name;
switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
break;
case CRYPTO_ALG_TYPE_GIVCIPHER:
case CRYPTO_ALG_TYPE_BLKCIPHER:
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
break;
default:
alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
}
if (IS_ERR(alg))
return PTR_ERR(alg);
......@@ -387,12 +457,20 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg;
u16 dump_alloc = 0;
if (link->dump == NULL)
return -EINVAL;
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
.min_dump_alloc = dump_alloc,
};
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
......
......@@ -280,11 +280,11 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
ictx->tfm_count++;
cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
ctx->cb_cpu = cpumask_first(cpu_active_mask);
ctx->cb_cpu = cpumask_first(cpu_online_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
......@@ -472,7 +472,7 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
goto err_free_padata;
}
cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
rcu_assign_pointer(pcrypt->cb_cpumask, mask);
pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
......
......@@ -31,6 +31,8 @@ static inline void crypto_set_aead_spawn(
crypto_set_spawn(&spawn->base, inst);
}
struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask);
......
......@@ -34,6 +34,8 @@ static inline void crypto_set_skcipher_spawn(
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask);
struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
......
......@@ -100,3 +100,6 @@ struct crypto_report_rng {
char type[CRYPTO_MAX_NAME];
unsigned int seedsize;
};
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
/*
* padata.c - generic interface to process data streams in parallel
*
* See Documentation/padata.txt for an api documentation.
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
......@@ -354,13 +356,13 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
return -ENOMEM;
cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pd->cpumask.cbcpu);
return -ENOMEM;
}
cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
return 0;
}
......@@ -564,7 +566,7 @@ EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
static bool padata_validate_cpumask(struct padata_instance *pinst,
const struct cpumask *cpumask)
{
if (!cpumask_intersects(cpumask, cpu_active_mask)) {
if (!cpumask_intersects(cpumask, cpu_online_mask)) {
pinst->flags |= PADATA_INVALID;
return false;
}
......@@ -678,7 +680,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
struct parallel_data *pd;
if (cpumask_test_cpu(cpu, cpu_active_mask)) {
if (cpumask_test_cpu(cpu, cpu_online_mask)) {
pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
pinst->cpumask.cbcpu);
if (!pd)
......@@ -746,6 +748,9 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
return -ENOMEM;
padata_replace(pinst, pd);
cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment