Commit 30066ce6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.9:

  API:
   - The crypto engine code now supports hashes.

  Algorithms:
   - Allow keys >= 2048 bits in FIPS mode for RSA.

  Drivers:
   - Memory overwrite fix for vmx ghash.
   - Add support for building ARM sha1-neon in Thumb2 mode.
   - Reenable ARM ghash-ce code by adding import/export.
   - Reenable img-hash by adding import/export.
   - Add support for multiple cores in omap-aes.
   - Add little-endian support for sha1-powerpc.
   - Add Cavium HWRNG driver for ThunderX SoC"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits)
  crypto: caam - treat SGT address pointer as u64
  crypto: ccp - Make syslog errors human-readable
  crypto: ccp - clean up data structure
  crypto: vmx - Ensure ghash-generic is enabled
  crypto: testmgr - add guard to dst buffer for ahash_export
  crypto: caam - Unmap region obtained by of_iomap
  crypto: sha1-powerpc - little-endian support
  crypto: gcm - Fix IV buffer size in crypto_gcm_setkey
  crypto: vmx - Fix memory corruption caused by p8_ghash
  crypto: ghash-generic - move common definitions to a new header file
  crypto: caam - fix sg dump
  hwrng: omap - Only fail if pm_runtime_get_sync returns < 0
  crypto: omap-sham - shrink the internal buffer size
  crypto: omap-sham - add support for export/import
  crypto: omap-sham - convert driver logic to use sgs for data xmit
  crypto: omap-sham - change the DMA threshold value to a define
  crypto: omap-sham - add support functions for sg based data handling
  crypto: omap-sham - rename sgl to sgl_tmp for deprecation
  crypto: omap-sham - align algorithms on word offset
  crypto: omap-sham - add context export/import stubs
  ...
parents 6763afe4 c3afafa4
...@@ -797,7 +797,8 @@ kernel crypto API | Caller ...@@ -797,7 +797,8 @@ kernel crypto API | Caller
include/linux/crypto.h and their definition can be seen below. include/linux/crypto.h and their definition can be seen below.
The former function registers a single transformation, while The former function registers a single transformation, while
the latter works on an array of transformation descriptions. the latter works on an array of transformation descriptions.
The latter is useful when registering transformations in bulk. The latter is useful when registering transformations in bulk,
for example when a driver implements multiple transformations.
</para> </para>
<programlisting> <programlisting>
...@@ -822,18 +823,31 @@ kernel crypto API | Caller ...@@ -822,18 +823,31 @@ kernel crypto API | Caller
</para> </para>
<para> <para>
The bulk registration / unregistration functions require The bulk registration/unregistration functions
that struct crypto_alg is an array of count size. These register/unregister each transformation in the given array of
functions simply loop over that array and register / length count. They handle errors as follows:
unregister each individual algorithm. If an error occurs,
the loop is terminated at the offending algorithm definition.
That means, the algorithms prior to the offending algorithm
are successfully registered. Note, the caller has no way of
knowing which cipher implementations have successfully
registered. If this is important to know, the caller should
loop through the different implementations using the single
instance *_alg functions for each individual implementation.
</para> </para>
<itemizedlist>
<listitem>
<para>
crypto_register_algs() succeeds if and only if it
successfully registers all the given transformations. If an
error occurs partway through, then it rolls back successful
registrations before returning the error code. Note that if
a driver needs to handle registration errors for individual
transformations, then it will need to use the non-bulk
function crypto_register_alg() instead.
</para>
</listitem>
<listitem>
<para>
crypto_unregister_algs() tries to unregister all the given
transformations, continuing on error. It logs errors and
always returns zero.
</para>
</listitem>
</itemizedlist>
</sect1> </sect1>
<sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title> <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
......
...@@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = { ...@@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = {
.setkey = ghash_setkey, .setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx), .descsize = sizeof(struct ghash_desc_ctx),
.base = { .base = {
.cra_name = "ghash", .cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce", .cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0, .cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
...@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req) ...@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req)
} }
} }
static int ghash_async_import(struct ahash_request *req, const void *in)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
static int ghash_async_export(struct ahash_request *req, void *out)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
return crypto_shash_export(desc, out);
}
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
...@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = { ...@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final, .final = ghash_async_final,
.setkey = ghash_async_setkey, .setkey = ghash_async_setkey,
.digest = ghash_async_digest, .digest = ghash_async_digest,
.import = ghash_async_import,
.export = ghash_async_export,
.halg.digestsize = GHASH_DIGEST_SIZE, .halg.digestsize = GHASH_DIGEST_SIZE,
.halg.statesize = sizeof(struct ghash_desc_ctx),
.halg.base = { .halg.base = {
.cra_name = "ghash", .cra_name = "ghash",
.cra_driver_name = "ghash-ce", .cra_driver_name = "ghash-ce",
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <asm/assembler.h> #include <asm/assembler.h>
.syntax unified .syntax unified
.code 32
.fpu neon .fpu neon
.text .text
......
...@@ -7,6 +7,15 @@ ...@@ -7,6 +7,15 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#ifdef __BIG_ENDIAN__
#define LWZ(rt, d, ra) \
lwz rt,d(ra)
#else
#define LWZ(rt, d, ra) \
li rt,d; \
lwbrx rt,rt,ra
#endif
/* /*
* We roll the registers for T, A, B, C, D, E around on each * We roll the registers for T, A, B, C, D, E around on each
* iteration; T on iteration t is A on iteration t+1, and so on. * iteration; T on iteration t is A on iteration t+1, and so on.
...@@ -23,7 +32,7 @@ ...@@ -23,7 +32,7 @@
#define W(t) (((t)%16)+16) #define W(t) (((t)%16)+16)
#define LOADW(t) \ #define LOADW(t) \
lwz W(t),(t)*4(r4) LWZ(W(t),(t)*4,r4)
#define STEPD0_LOAD(t) \ #define STEPD0_LOAD(t) \
andc r0,RD(t),RB(t); \ andc r0,RD(t),RB(t); \
...@@ -33,7 +42,7 @@ ...@@ -33,7 +42,7 @@
add r0,RE(t),r15; \ add r0,RE(t),r15; \
add RT(t),RT(t),r6; \ add RT(t),RT(t),r6; \
add r14,r0,W(t); \ add r14,r0,W(t); \
lwz W((t)+4),((t)+4)*4(r4); \ LWZ(W((t)+4),((t)+4)*4,r4); \
rotlwi RB(t),RB(t),30; \ rotlwi RB(t),RB(t),30; \
add RT(t),RT(t),r14 add RT(t),RT(t),r14
......
...@@ -39,6 +39,37 @@ struct algif_hash_tfm { ...@@ -39,6 +39,37 @@ struct algif_hash_tfm {
bool has_key; bool has_key;
}; };
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
if (ctx->result)
return 0;
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
if (!ctx->result)
return -ENOMEM;
memset(ctx->result, 0, ds);
return 0;
}
static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
if (!ctx->result)
return;
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
sock_kzfree_s(sk, ctx->result, ds);
ctx->result = NULL;
}
static int hash_sendmsg(struct socket *sock, struct msghdr *msg, static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored) size_t ignored)
{ {
...@@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk); lock_sock(sk);
if (!ctx->more) { if (!ctx->more) {
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
&ctx->completion); &ctx->completion);
if (err) if (err)
...@@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ctx->more = msg->msg_flags & MSG_MORE; ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) { if (!ctx->more) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion); &ctx->completion);
...@@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, ...@@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
sg_init_table(ctx->sgl.sg, 1); sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset); sg_set_page(ctx->sgl.sg, page, size, offset);
if (!(flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
} else if (!ctx->more)
hash_free_result(sk, ctx);
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
if (!(flags & MSG_MORE)) { if (!(flags & MSG_MORE)) {
...@@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private; struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
bool result;
int err; int err;
if (len > ds) if (len > ds)
...@@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_flags |= MSG_TRUNC; msg->msg_flags |= MSG_TRUNC;
lock_sock(sk); lock_sock(sk);
result = ctx->result;
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (ctx->more) { if (ctx->more) {
ctx->more = 0; ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion); &ctx->completion);
if (err) if (err)
goto unlock; goto unlock;
} else if (!result) {
err = af_alg_wait_for_completion(
crypto_ahash_digest(&ctx->req),
&ctx->completion);
} }
err = memcpy_to_msg(msg, ctx->result, len); err = memcpy_to_msg(msg, ctx->result, len);
hash_free_result(sk, ctx);
unlock: unlock:
release_sock(sk); release_sock(sk);
...@@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk) ...@@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk)
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private; struct hash_ctx *ctx = ask->private;
sock_kzfree_s(sk, ctx->result, hash_free_result(sk, ctx);
crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
sock_kfree_s(sk, ctx, ctx->len); sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk); af_alg_release_parent(sk);
} }
...@@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ...@@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
struct algif_hash_tfm *tfm = private; struct algif_hash_tfm *tfm = private;
struct crypto_ahash *hash = tfm->hash; struct crypto_ahash *hash = tfm->hash;
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL); ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); ctx->result = NULL;
if (!ctx->result) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->result, 0, ds);
ctx->len = len; ctx->len = len;
ctx->more = 0; ctx->more = 0;
af_alg_init_completion(&ctx->completion); af_alg_init_completion(&ctx->completion);
......
...@@ -107,10 +107,7 @@ static struct shash_alg alg = { ...@@ -107,10 +107,7 @@ static struct shash_alg alg = {
static int __init crct10dif_mod_init(void) static int __init crct10dif_mod_init(void)
{ {
int ret; return crypto_register_shash(&alg);
ret = crypto_register_shash(&alg);
return ret;
} }
static void __exit crct10dif_mod_fini(void) static void __exit crct10dif_mod_fini(void)
......
...@@ -14,13 +14,12 @@ ...@@ -14,13 +14,12 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <crypto/engine.h>
#include <crypto/internal/hash.h>
#include "internal.h" #include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10 #define CRYPTO_ENGINE_MAX_QLEN 10
void crypto_finalize_request(struct crypto_engine *engine,
struct ablkcipher_request *req, int err);
/** /**
* crypto_pump_requests - dequeue one request from engine queue to process * crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine * @engine: the hardware engine
...@@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine, ...@@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread) bool in_kthread)
{ {
struct crypto_async_request *async_req, *backlog; struct crypto_async_request *async_req, *backlog;
struct ablkcipher_request *req; struct ahash_request *hreq;
struct ablkcipher_request *breq;
unsigned long flags; unsigned long flags;
bool was_busy = false; bool was_busy = false;
int ret; int ret, rtype;
spin_lock_irqsave(&engine->queue_lock, flags); spin_lock_irqsave(&engine->queue_lock, flags);
...@@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, ...@@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!async_req) if (!async_req)
goto out; goto out;
req = ablkcipher_request_cast(async_req); engine->cur_req = async_req;
engine->cur_req = req;
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
...@@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, ...@@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags); spin_unlock_irqrestore(&engine->queue_lock, flags);
rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */ /* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) { if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine); ret = engine->prepare_crypt_hardware(engine);
...@@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine, ...@@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine,
} }
} }
if (engine->prepare_request) { switch (rtype) {
ret = engine->prepare_request(engine, engine->cur_req); case CRYPTO_ALG_TYPE_AHASH:
hreq = ahash_request_cast(engine->cur_req);
if (engine->prepare_hash_request) {
ret = engine->prepare_hash_request(engine, hreq);
if (ret) { if (ret) {
pr_err("failed to prepare request: %d\n", ret); pr_err("failed to prepare request: %d\n", ret);
goto req_err; goto req_err;
} }
engine->cur_req_prepared = true; engine->cur_req_prepared = true;
} }
ret = engine->hash_one_request(engine, hreq);
ret = engine->crypt_one_request(engine, engine->cur_req);
if (ret) { if (ret) {
pr_err("failed to crypt one request from queue\n"); pr_err("failed to hash one request from queue\n");
goto req_err; goto req_err;
} }
return; return;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
breq = ablkcipher_request_cast(engine->cur_req);
if (engine->prepare_cipher_request) {
ret = engine->prepare_cipher_request(engine, breq);
if (ret) {
pr_err("failed to prepare request: %d\n", ret);
goto req_err;
}
engine->cur_req_prepared = true;
}
ret = engine->cipher_one_request(engine, breq);
if (ret) {
pr_err("failed to cipher one request from queue\n");
goto req_err;
}
return;
default:
pr_err("failed to prepare request of unknown type\n");
return;
}
req_err: req_err:
crypto_finalize_request(engine, engine->cur_req, ret); switch (rtype) {
case CRYPTO_ALG_TYPE_AHASH:
hreq = ahash_request_cast(engine->cur_req);
crypto_finalize_hash_request(engine, hreq, ret);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
breq = ablkcipher_request_cast(engine->cur_req);
crypto_finalize_cipher_request(engine, breq, ret);
break;
}
return; return;
out: out:
...@@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work) ...@@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work)
} }
/** /**
* crypto_transfer_request - transfer the new request into the engine queue * crypto_transfer_cipher_request - transfer the new request into the
* enginequeue
* @engine: the hardware engine * @engine: the hardware engine
* @req: the request need to be listed into the engine queue * @req: the request need to be listed into the engine queue
*/ */
int crypto_transfer_request(struct crypto_engine *engine, int crypto_transfer_cipher_request(struct crypto_engine *engine,
struct ablkcipher_request *req, bool need_pump) struct ablkcipher_request *req,
bool need_pump)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -162,28 +194,70 @@ int crypto_transfer_request(struct crypto_engine *engine, ...@@ -162,28 +194,70 @@ int crypto_transfer_request(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags); spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(crypto_transfer_request); EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
/** /**
* crypto_transfer_request_to_engine - transfer one request to list into the * crypto_transfer_cipher_request_to_engine - transfer one request to list
* engine queue * into the engine queue
* @engine: the hardware engine * @engine: the hardware engine
* @req: the request need to be listed into the engine queue * @req: the request need to be listed into the engine queue
*/ */
int crypto_transfer_request_to_engine(struct crypto_engine *engine, int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
struct ablkcipher_request *req) struct ablkcipher_request *req)
{ {
return crypto_transfer_request(engine, req, true); return crypto_transfer_cipher_request(engine, req, true);
} }
EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine); EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
/** /**
* crypto_finalize_request - finalize one request if the request is done * crypto_transfer_hash_request - transfer the new request into the
* enginequeue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request(struct crypto_engine *engine,
struct ahash_request *req, bool need_pump)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (!engine->running) {
spin_unlock_irqrestore(&engine->queue_lock, flags);
return -ESHUTDOWN;
}
ret = ahash_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
queue_kthread_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
/**
* crypto_transfer_hash_request_to_engine - transfer one request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req)
{
return crypto_transfer_hash_request(engine, req, true);
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
* crypto_finalize_cipher_request - finalize one request if the request is done
* @engine: the hardware engine * @engine: the hardware engine
* @req: the request need to be finalized * @req: the request need to be finalized
* @err: error number * @err: error number
*/ */
void crypto_finalize_request(struct crypto_engine *engine, void crypto_finalize_cipher_request(struct crypto_engine *engine,
struct ablkcipher_request *req, int err) struct ablkcipher_request *req, int err)
{ {
unsigned long flags; unsigned long flags;
...@@ -191,17 +265,54 @@ void crypto_finalize_request(struct crypto_engine *engine, ...@@ -191,17 +265,54 @@ void crypto_finalize_request(struct crypto_engine *engine,
int ret; int ret;
spin_lock_irqsave(&engine->queue_lock, flags); spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) if (engine->cur_req == &req->base)
finalize_cur_req = true; finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags); spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) { if (finalize_cur_req) {
if (engine->cur_req_prepared && engine->unprepare_request) { if (engine->cur_req_prepared &&
ret = engine->unprepare_request(engine, req); engine->unprepare_cipher_request) {
ret = engine->unprepare_cipher_request(engine, req);
if (ret) if (ret)
pr_err("failed to unprepare request\n"); pr_err("failed to unprepare request\n");
} }
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
req->base.complete(&req->base, err);
queue_kthread_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
/**
* crypto_finalize_hash_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == &req->base)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) {
if (engine->cur_req_prepared &&
engine->unprepare_hash_request) {
ret = engine->unprepare_hash_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags); spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL; engine->cur_req = NULL;
engine->cur_req_prepared = false; engine->cur_req_prepared = false;
...@@ -212,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine, ...@@ -212,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine,
queue_kthread_work(&engine->kworker, &engine->pump_requests); queue_kthread_work(&engine->kworker, &engine->pump_requests);
} }
EXPORT_SYMBOL_GPL(crypto_finalize_request); EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/** /**
* crypto_engine_start - start the hardware engine * crypto_engine_start - start the hardware engine
...@@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start); ...@@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start);
int crypto_engine_stop(struct crypto_engine *engine) int crypto_engine_stop(struct crypto_engine *engine)
{ {
unsigned long flags; unsigned long flags;
unsigned limit = 500; unsigned int limit = 500;
int ret = 0; int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags); spin_lock_irqsave(&engine->queue_lock, flags);
......
...@@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) ...@@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
goto err; goto err;
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Vbuf) if (!drbg->Vbuf) {
ret = -ENOMEM;
goto fini; goto fini;
}
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1); drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Cbuf) if (!drbg->Cbuf) {
ret = -ENOMEM;
goto fini; goto fini;
}
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1); drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */ /* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC) if (drbg->core->flags & DRBG_HMAC)
...@@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) ...@@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (0 < sb_size) { if (0 < sb_size) {
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL); drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
if (!drbg->scratchpadbuf) if (!drbg->scratchpadbuf) {
ret = -ENOMEM;
goto fini; goto fini;
}
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
} }
...@@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void) ...@@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void)
return -ENOMEM; return -ENOMEM;
mutex_init(&drbg->drbg_mutex); mutex_init(&drbg->drbg_mutex);
drbg->core = &drbg_cores[coreref];
drbg->reseed_threshold = drbg_max_requests(drbg);
/* /*
* if the following tests fail, it is likely that there is a buffer * if the following tests fail, it is likely that there is a buffer
...@@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void) ...@@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void)
* grave bug. * grave bug.
*/ */
/* get a valid instance of DRBG for following tests */
ret = drbg_instantiate(drbg, NULL, coreref, pr);
if (ret) {
rc = ret;
goto outbuf;
}
max_addtllen = drbg_max_addtl(drbg); max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg); max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1); drbg_string_fill(&addtl, buf, max_addtllen + 1);
...@@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void) ...@@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void)
/* overflow max_bits */ /* overflow max_bits */
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL); len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
BUG_ON(0 < len); BUG_ON(0 < len);
drbg_uninstantiate(drbg);
/* overflow max addtllen with personalization string */ /* overflow max addtllen with personalization string */
ret = drbg_instantiate(drbg, &addtl, coreref, pr); ret = drbg_seed(drbg, &addtl, false);
BUG_ON(0 == ret); BUG_ON(0 == ret);
/* all tests passed */ /* all tests passed */
rc = 0; rc = 0;
...@@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void) ...@@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void)
pr_devel("DRBG: Sanity tests for failure code paths successfully " pr_devel("DRBG: Sanity tests for failure code paths successfully "
"completed\n"); "completed\n");
drbg_uninstantiate(drbg); kfree(drbg);
outbuf:
kzfree(drbg);
return rc; return rc;
} }
...@@ -2006,7 +2005,7 @@ static int __init drbg_init(void) ...@@ -2006,7 +2005,7 @@ static int __init drbg_init(void)
{ {
unsigned int i = 0; /* pointer to drbg_algs */ unsigned int i = 0; /* pointer to drbg_algs */
unsigned int j = 0; /* pointer to drbg_cores */ unsigned int j = 0; /* pointer to drbg_cores */
int ret = -EFAULT; int ret;
ret = drbg_healthcheck_sanity(); ret = drbg_healthcheck_sanity();
if (ret) if (ret)
...@@ -2016,7 +2015,7 @@ static int __init drbg_init(void) ...@@ -2016,7 +2015,7 @@ static int __init drbg_init(void)
pr_info("DRBG: Cannot register all DRBG types" pr_info("DRBG: Cannot register all DRBG types"
"(slots needed: %zu, slots available: %zu)\n", "(slots needed: %zu, slots available: %zu)\n",
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs)); ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
return ret; return -EFAULT;
} }
/* /*
......
...@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_skcipher *ctr = ctx->ctr; struct crypto_skcipher *ctr = ctx->ctr;
struct { struct {
be128 hash; be128 hash;
u8 iv[8]; u8 iv[16];
struct crypto_gcm_setkey_result result; struct crypto_gcm_setkey_result result;
......
...@@ -14,24 +14,13 @@ ...@@ -14,24 +14,13 @@
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc) static int ghash_init(struct shash_desc *desc)
{ {
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
......
...@@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); ...@@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
int ahash_mcryptd_digest(struct ahash_request *desc) int ahash_mcryptd_digest(struct ahash_request *desc)
{ {
int err; return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
err = crypto_ahash_init(desc) ?:
ahash_mcryptd_finup(desc);
return err;
} }
int ahash_mcryptd_update(struct ahash_request *desc) int ahash_mcryptd_update(struct ahash_request *desc)
......
...@@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, ...@@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
n_sz--; n_sz--;
} }
/* In FIPS mode only allow key size 2K & 3K */ /* In FIPS mode only allow key size 2K and higher */
if (n_sz != 256 && n_sz != 384) { if (n_sz < 256) {
pr_err("RSA: key size not allowed in FIPS mode\n"); pr_err("RSA: key size not allowed in FIPS mode\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq, ...@@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state; char *state;
struct ahash_request *req; struct ahash_request *req;
int statesize, ret = -EINVAL; int statesize, ret = -EINVAL;
const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
req = *preq; req = *preq;
statesize = crypto_ahash_statesize( statesize = crypto_ahash_statesize(
crypto_ahash_reqtfm(req)); crypto_ahash_reqtfm(req));
state = kmalloc(statesize, GFP_KERNEL); state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
if (!state) { if (!state) {
pr_err("alt: hash: Failed to alloc state for %s\n", algo); pr_err("alt: hash: Failed to alloc state for %s\n", algo);
goto out_nostate; goto out_nostate;
} }
memcpy(state + statesize, guard, sizeof(guard));
ret = crypto_ahash_export(req, state); ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) { if (ret) {
pr_err("alt: hash: Failed to export() for %s\n", algo); pr_err("alt: hash: Failed to export() for %s\n", algo);
goto out; goto out;
...@@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, ...@@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen); memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen); ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (!ret == template[i].fail) { if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm)); d, j, algo, crypto_aead_get_flags(tfm));
goto out; goto out;
...@@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, ...@@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen); memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen); ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (!ret == template[i].fail) { if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm)); d, j, algo, crypto_aead_get_flags(tfm));
goto out; goto out;
...@@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, ...@@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
if (template[i].np) if (template[i].np)
continue; continue;
if (fips_enabled && template[i].fips_skip)
continue;
j++; j++;
ret = -EINVAL; ret = -EINVAL;
...@@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, ...@@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = crypto_cipher_setkey(tfm, template[i].key, ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen); template[i].klen);
if (!ret == template[i].fail) { if (template[i].fail == !ret) {
printk(KERN_ERR "alg: cipher: setkey failed " printk(KERN_ERR "alg: cipher: setkey failed "
"on test %d for %s: flags=%x\n", j, "on test %d for %s: flags=%x\n", j,
algo, crypto_cipher_get_flags(tfm)); algo, crypto_cipher_get_flags(tfm));
...@@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, ...@@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (template[i].np && !template[i].also_non_np) if (template[i].np && !template[i].also_non_np)
continue; continue;
if (fips_enabled && template[i].fips_skip)
continue;
if (template[i].iv) if (template[i].iv)
memcpy(iv, template[i].iv, ivsize); memcpy(iv, template[i].iv, ivsize);
else else
...@@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, ...@@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key, ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen); template[i].klen);
if (!ret == template[i].fail) { if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm)); d, j, algo, crypto_skcipher_get_flags(tfm));
goto out; goto out;
...@@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, ...@@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (!template[i].np) if (!template[i].np)
continue; continue;
if (fips_enabled && template[i].fips_skip)
continue;
if (template[i].iv) if (template[i].iv)
memcpy(iv, template[i].iv, ivsize); memcpy(iv, template[i].iv, ivsize);
else else
...@@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, ...@@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key, ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen); template[i].klen);
if (!ret == template[i].fail) { if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm)); d, j, algo, crypto_skcipher_get_flags(tfm));
goto out; goto out;
......
...@@ -59,6 +59,7 @@ struct hash_testvec { ...@@ -59,6 +59,7 @@ struct hash_testvec {
* @tap: How to distribute data in @np SGs * @tap: How to distribute data in @np SGs
* @also_non_np: if set to 1, the test will be also done without * @also_non_np: if set to 1, the test will be also done without
* splitting data in @np SGs * splitting data in @np SGs
* @fips_skip: Skip the test vector in FIPS mode
*/ */
struct cipher_testvec { struct cipher_testvec {
...@@ -75,6 +76,7 @@ struct cipher_testvec { ...@@ -75,6 +76,7 @@ struct cipher_testvec {
unsigned char klen; unsigned char klen;
unsigned short ilen; unsigned short ilen;
unsigned short rlen; unsigned short rlen;
bool fips_skip;
}; };
struct aead_testvec { struct aead_testvec {
...@@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = { ...@@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00", "\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32, .klen = 32,
.fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00" .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00", "\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x00\x00\x00\x00\x00\x00\x00\x00" .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
...@@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { ...@@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00", "\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32, .klen = 32,
.fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00" .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00", "\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec" .input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
...@@ -24,6 +24,10 @@ ...@@ -24,6 +24,10 @@
#include <linux/preempt.h> #include <linux/preempt.h>
#include <asm/xor.h> #include <asm/xor.h>
#ifndef XOR_SELECT_TEMPLATE
#define XOR_SELECT_TEMPLATE(x) (x)
#endif
/* The xor routines to use. */ /* The xor routines to use. */
static struct xor_block_template *active_template; static struct xor_block_template *active_template;
...@@ -109,6 +113,15 @@ calibrate_xor_blocks(void) ...@@ -109,6 +113,15 @@ calibrate_xor_blocks(void)
void *b1, *b2; void *b1, *b2;
struct xor_block_template *f, *fastest; struct xor_block_template *f, *fastest;
fastest = XOR_SELECT_TEMPLATE(NULL);
if (fastest) {
printk(KERN_INFO "xor: automatically using best "
"checksumming function %-10s\n",
fastest->name);
goto out;
}
/* /*
* Note: Since the memory is not actually used for _anything_ but to * Note: Since the memory is not actually used for _anything_ but to
* test the XOR speed, we don't really want kmemcheck to warn about * test the XOR speed, we don't really want kmemcheck to warn about
...@@ -126,36 +139,22 @@ calibrate_xor_blocks(void) ...@@ -126,36 +139,22 @@ calibrate_xor_blocks(void)
* all the possible functions, just test the best one * all the possible functions, just test the best one
*/ */
fastest = NULL;
#ifdef XOR_SELECT_TEMPLATE
fastest = XOR_SELECT_TEMPLATE(fastest);
#endif
#define xor_speed(templ) do_xor_speed((templ), b1, b2) #define xor_speed(templ) do_xor_speed((templ), b1, b2)
if (fastest) {
printk(KERN_INFO "xor: automatically using best "
"checksumming function:\n");
xor_speed(fastest);
goto out;
} else {
printk(KERN_INFO "xor: measuring software checksum speed\n"); printk(KERN_INFO "xor: measuring software checksum speed\n");
XOR_TRY_TEMPLATES; XOR_TRY_TEMPLATES;
fastest = template_list; fastest = template_list;
for (f = fastest; f; f = f->next) for (f = fastest; f; f = f->next)
if (f->speed > fastest->speed) if (f->speed > fastest->speed)
fastest = f; fastest = f;
}
printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n", printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
fastest->name, fastest->speed / 1000, fastest->speed % 1000); fastest->name, fastest->speed / 1000, fastest->speed % 1000);
#undef xor_speed #undef xor_speed
out:
free_pages((unsigned long)b1, 2); free_pages((unsigned long)b1, 2);
out:
active_template = fastest; active_template = fastest;
return 0; return 0;
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* *
* Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
* *
* Based om ecb.c * Based on ecb.c
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
......
...@@ -410,6 +410,19 @@ config HW_RANDOM_MESON ...@@ -410,6 +410,19 @@ config HW_RANDOM_MESON
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Cavium SoCs.
To compile this driver as a module, choose M here: the
module will be called cavium_rng.
If unsure, say Y.
endif # HW_RANDOM endif # HW_RANDOM
config UML_RANDOM config UML_RANDOM
......
...@@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o ...@@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
...@@ -24,16 +24,18 @@ ...@@ -24,16 +24,18 @@
* warranty of any kind, whether express or implied. * warranty of any kind, whether express or implied.
*/ */
#include <linux/module.h> #include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <asm/io.h>
#define PFX KBUILD_MODNAME ": " #define DRV_NAME "AMD768-HWRNG"
#define RNGDATA 0x00
#define RNGDONE 0x04
#define PMBASE_OFFSET 0xF0
#define PMBASE_SIZE 8
/* /*
* Data for PCI driver interface * Data for PCI driver interface
...@@ -50,72 +52,84 @@ static const struct pci_device_id pci_tbl[] = { ...@@ -50,72 +52,84 @@ static const struct pci_device_id pci_tbl[] = {
}; };
MODULE_DEVICE_TABLE(pci, pci_tbl); MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct pci_dev *amd_pdev; struct amd768_priv {
void __iomem *iobase;
struct pci_dev *pcidev;
};
static int amd_rng_data_present(struct hwrng *rng, int wait) static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{ {
u32 pmbase = (u32)rng->priv; u32 *data = buf;
int data, i; struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
size_t read = 0;
for (i = 0; i < 20; i++) { /* We will wait at maximum one time per read */
data = !!(inl(pmbase + 0xF4) & 1); int timeout = max / 4 + 1;
if (data || !wait)
break; /*
udelay(10); * RNG data is available when RNGDONE is set to 1
* New random numbers are generated approximately 128 microseconds
* after RNGDATA is read
*/
while (read < max) {
if (ioread32(priv->iobase + RNGDONE) == 0) {
if (wait) {
/* Delay given by datasheet */
usleep_range(128, 196);
if (timeout-- == 0)
return read;
} else {
return 0;
}
} else {
*data = ioread32(priv->iobase + RNGDATA);
data++;
read += 4;
}
} }
return data;
}
static int amd_rng_data_read(struct hwrng *rng, u32 *data)
{
u32 pmbase = (u32)rng->priv;
*data = inl(pmbase + 0xF0);
return 4; return read;
} }
static int amd_rng_init(struct hwrng *rng) static int amd_rng_init(struct hwrng *rng)
{ {
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
u8 rnen; u8 rnen;
pci_read_config_byte(amd_pdev, 0x40, &rnen); pci_read_config_byte(priv->pcidev, 0x40, &rnen);
rnen |= (1 << 7); /* RNG on */ rnen |= BIT(7); /* RNG on */
pci_write_config_byte(amd_pdev, 0x40, rnen); pci_write_config_byte(priv->pcidev, 0x40, rnen);
pci_read_config_byte(amd_pdev, 0x41, &rnen); pci_read_config_byte(priv->pcidev, 0x41, &rnen);
rnen |= (1 << 7); /* PMIO enable */ rnen |= BIT(7); /* PMIO enable */
pci_write_config_byte(amd_pdev, 0x41, rnen); pci_write_config_byte(priv->pcidev, 0x41, rnen);
return 0; return 0;
} }
static void amd_rng_cleanup(struct hwrng *rng) static void amd_rng_cleanup(struct hwrng *rng)
{ {
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
u8 rnen; u8 rnen;
pci_read_config_byte(amd_pdev, 0x40, &rnen); pci_read_config_byte(priv->pcidev, 0x40, &rnen);
rnen &= ~(1 << 7); /* RNG off */ rnen &= ~BIT(7); /* RNG off */
pci_write_config_byte(amd_pdev, 0x40, rnen); pci_write_config_byte(priv->pcidev, 0x40, rnen);
} }
static struct hwrng amd_rng = { static struct hwrng amd_rng = {
.name = "amd", .name = "amd",
.init = amd_rng_init, .init = amd_rng_init,
.cleanup = amd_rng_cleanup, .cleanup = amd_rng_cleanup,
.data_present = amd_rng_data_present, .read = amd_rng_read,
.data_read = amd_rng_data_read,
}; };
static int __init mod_init(void) static int __init mod_init(void)
{ {
int err = -ENODEV; int err = -ENODEV;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
const struct pci_device_id *ent; const struct pci_device_id *ent;
u32 pmbase; u32 pmbase;
struct amd768_priv *priv;
for_each_pci_dev(pdev) { for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev); ent = pci_match_id(pci_tbl, pdev);
...@@ -123,42 +137,44 @@ static int __init mod_init(void) ...@@ -123,42 +137,44 @@ static int __init mod_init(void)
goto found; goto found;
} }
/* Device not found. */ /* Device not found. */
goto out; return -ENODEV;
found: found:
err = pci_read_config_dword(pdev, 0x58, &pmbase); err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err) if (err)
goto out; return err;
err = -EIO;
pmbase &= 0x0000FF00; pmbase &= 0x0000FF00;
if (pmbase == 0) if (pmbase == 0)
goto out; return -EIO;
if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n", priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
PMBASE_SIZE, DRV_NAME)) {
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
pmbase + 0xF0); pmbase + 0xF0);
err = -EBUSY; return -EBUSY;
goto out;
} }
amd_rng.priv = (unsigned long)pmbase;
amd_pdev = pdev; priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
PMBASE_SIZE);
pr_info("AMD768 RNG detected\n"); if (!priv->iobase) {
err = hwrng_register(&amd_rng); pr_err(DRV_NAME "Cannot map ioport\n");
if (err) { return -ENOMEM;
pr_err(PFX "RNG registering failed (%d)\n",
err);
release_region(pmbase + 0xF0, 8);
goto out;
} }
out:
return err; amd_rng.priv = (unsigned long)priv;
priv->pcidev = pdev;
pr_info(DRV_NAME " detected\n");
return devm_hwrng_register(&pdev->dev, &amd_rng);
} }
static void __exit mod_exit(void) static void __exit mod_exit(void)
{ {
u32 pmbase = (unsigned long)amd_rng.priv;
release_region(pmbase + 0xF0, 8);
hwrng_unregister(&amd_rng);
} }
module_init(mod_init); module_init(mod_init);
......
...@@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev) ...@@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
bcm2835_rng_ops.priv = (unsigned long)rng_base; bcm2835_rng_ops.priv = (unsigned long)rng_base;
rng_id = of_match_node(bcm2835_rng_of_match, np); rng_id = of_match_node(bcm2835_rng_of_match, np);
if (!rng_id) if (!rng_id) {
iounmap(rng_base);
return -EINVAL; return -EINVAL;
}
/* Check for rng init function, execute it */ /* Check for rng init function, execute it */
rng_setup = rng_id->data; rng_setup = rng_id->data;
if (rng_setup) if (rng_setup)
......
/*
* Hardware Random Number Generator support for Cavium, Inc.
* Thunder processor family.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2016 Cavium, Inc.
*/
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
struct cavium_rng {
struct hwrng ops;
void __iomem *result;
};
/* Read data from the RNG unit */
static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
{
struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
unsigned int size = max;
while (size >= 8) {
*((u64 *)dat) = readq(p->result);
size -= 8;
dat += 8;
}
while (size > 0) {
*((u8 *)dat) = readb(p->result);
size--;
dat++;
}
return max;
}
/* Map Cavium RNG to an HWRNG object */
static int cavium_rng_probe_vf(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct cavium_rng *rng;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
/* Map the RNG result */
rng->result = pcim_iomap(pdev, 0, 0);
if (!rng->result) {
dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
return -ENOMEM;
}
rng->ops.name = "cavium rng";
rng->ops.read = cavium_rng_read;
rng->ops.quality = 1000;
pci_set_drvdata(pdev, rng);
ret = hwrng_register(&rng->ops);
if (ret) {
dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
return ret;
}
return 0;
}
/* Remove the VF */
void cavium_rng_remove_vf(struct pci_dev *pdev)
{
struct cavium_rng *rng;
rng = pci_get_drvdata(pdev);
hwrng_unregister(&rng->ops);
}
static const struct pci_device_id cavium_rng_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
{0,},
};
MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
static struct pci_driver cavium_rng_vf_driver = {
.name = "cavium_rng_vf",
.id_table = cavium_rng_vf_id_table,
.probe = cavium_rng_probe_vf,
.remove = cavium_rng_remove_vf,
};
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
MODULE_LICENSE("GPL");
/*
* Hardware Random Number Generator support for Cavium Inc.
* Thunder processor family.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2016 Cavium, Inc.
*/
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#define THUNDERX_RNM_ENT_EN 0x1
#define THUNDERX_RNM_RNG_EN 0x2
struct cavium_rng_pf {
void __iomem *control_status;
};
/* Enable the RNG hardware and activate the VF */
static int cavium_rng_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct cavium_rng_pf *rng;
int iov_err;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
/*Map the RNG control */
rng->control_status = pcim_iomap(pdev, 0, 0);
if (!rng->control_status) {
dev_err(&pdev->dev,
"Error iomap failed retrieving control_status.\n");
return -ENOMEM;
}
/* Enable the RNG hardware and entropy source */
writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN,
rng->control_status);
pci_set_drvdata(pdev, rng);
/* Enable the Cavium RNG as a VF */
iov_err = pci_enable_sriov(pdev, 1);
if (iov_err != 0) {
/* Disable the RNG hardware and entropy source */
writeq(0, rng->control_status);
dev_err(&pdev->dev,
"Error initializing RNG virtual function,(%i).\n",
iov_err);
return iov_err;
}
return 0;
}
/* Disable VF and RNG Hardware */
void cavium_rng_remove(struct pci_dev *pdev)
{
struct cavium_rng_pf *rng;
rng = pci_get_drvdata(pdev);
/* Remove the VF */
pci_disable_sriov(pdev);
/* Disable the RNG hardware and entropy source */
writeq(0, rng->control_status);
}
static const struct pci_device_id cavium_rng_pf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */
{0,},
};
MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table);
static struct pci_driver cavium_rng_pf_driver = {
.name = "cavium_rng_pf",
.id_table = cavium_rng_pf_id_table,
.probe = cavium_rng_probe,
.remove = cavium_rng_remove,
};
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
MODULE_LICENSE("GPL");
...@@ -449,22 +449,6 @@ int hwrng_register(struct hwrng *rng) ...@@ -449,22 +449,6 @@ int hwrng_register(struct hwrng *rng)
goto out; goto out;
mutex_lock(&rng_mutex); mutex_lock(&rng_mutex);
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
err = -ENOMEM;
if (!rng_buffer) {
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_buffer)
goto out_unlock;
}
if (!rng_fillbuf) {
rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_fillbuf) {
kfree(rng_buffer);
goto out_unlock;
}
}
/* Must not register two RNGs with the same name. */ /* Must not register two RNGs with the same name. */
err = -EEXIST; err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) { list_for_each_entry(tmp, &rng_list, list) {
...@@ -573,7 +557,26 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister); ...@@ -573,7 +557,26 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
static int __init hwrng_modinit(void) static int __init hwrng_modinit(void)
{ {
return register_miscdev(); int ret = -ENOMEM;
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_buffer)
return -ENOMEM;
rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
if (!rng_fillbuf) {
kfree(rng_buffer);
return -ENOMEM;
}
ret = register_miscdev();
if (ret) {
kfree(rng_fillbuf);
kfree(rng_buffer);
}
return ret;
} }
static void __exit hwrng_modexit(void) static void __exit hwrng_modexit(void)
......
...@@ -24,15 +24,12 @@ ...@@ -24,15 +24,12 @@
* warranty of any kind, whether express or implied. * warranty of any kind, whether express or implied.
*/ */
#include <linux/module.h> #include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <asm/io.h>
#define PFX KBUILD_MODNAME ": "
#define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_DATA_REG 0x50
#define GEODE_RNG_STATUS_REG 0x54 #define GEODE_RNG_STATUS_REG 0x54
...@@ -85,7 +82,6 @@ static struct hwrng geode_rng = { ...@@ -85,7 +82,6 @@ static struct hwrng geode_rng = {
static int __init mod_init(void) static int __init mod_init(void)
{ {
int err = -ENODEV;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
const struct pci_device_id *ent; const struct pci_device_id *ent;
void __iomem *mem; void __iomem *mem;
...@@ -93,43 +89,27 @@ static int __init mod_init(void) ...@@ -93,43 +89,27 @@ static int __init mod_init(void)
for_each_pci_dev(pdev) { for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev); ent = pci_match_id(pci_tbl, pdev);
if (ent) if (ent) {
goto found;
}
/* Device not found. */
goto out;
found:
rng_base = pci_resource_start(pdev, 0); rng_base = pci_resource_start(pdev, 0);
if (rng_base == 0) if (rng_base == 0)
goto out; return -ENODEV;
err = -ENOMEM;
mem = ioremap(rng_base, 0x58); mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
if (!mem) if (!mem)
goto out; return -ENOMEM;
geode_rng.priv = (unsigned long)mem; geode_rng.priv = (unsigned long)mem;
pr_info("AMD Geode RNG detected\n"); pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng); return devm_hwrng_register(&pdev->dev, &geode_rng);
if (err) { }
pr_err(PFX "RNG registering failed (%d)\n",
err);
goto err_unmap;
} }
out:
return err;
err_unmap: /* Device not found. */
iounmap(mem); return -ENODEV;
goto out;
} }
static void __exit mod_exit(void) static void __exit mod_exit(void)
{ {
void __iomem *mem = (void __iomem *)geode_rng.priv;
hwrng_unregister(&geode_rng);
iounmap(mem);
} }
module_init(mod_init); module_init(mod_init);
......
...@@ -76,9 +76,6 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) ...@@ -76,9 +76,6 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct meson_rng_data *data = struct meson_rng_data *data =
container_of(rng, struct meson_rng_data, rng); container_of(rng, struct meson_rng_data, rng);
if (max < sizeof(u32))
return 0;
*(u32 *)buf = readl_relaxed(data->base + RNG_DATA); *(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
return sizeof(u32); return sizeof(u32);
......
...@@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev) ...@@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev);
if (ret) { if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap; goto err_ioremap;
...@@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev) ...@@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
int ret; int ret;
ret = pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);
if (ret) { if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret); dev_err(dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(dev); pm_runtime_put_noidle(dev);
return ret; return ret;
......
...@@ -71,12 +71,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) ...@@ -71,12 +71,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
return 0; return 0;
} }
static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
{
return 1;
}
static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
{ {
int r; int r;
...@@ -88,8 +83,7 @@ static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) ...@@ -88,8 +83,7 @@ static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
static struct hwrng omap3_rom_rng_ops = { static struct hwrng omap3_rom_rng_ops = {
.name = "omap3-rom", .name = "omap3-rom",
.data_present = omap3_rom_rng_data_present, .read = omap3_rom_rng_read,
.data_read = omap3_rom_rng_data_read,
}; };
static int omap3_rom_rng_probe(struct platform_device *pdev) static int omap3_rom_rng_probe(struct platform_device *pdev)
......
...@@ -95,42 +95,20 @@ static struct hwrng pasemi_rng = { ...@@ -95,42 +95,20 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read, .data_read = pasemi_rng_data_read,
}; };
static int rng_probe(struct platform_device *ofdev) static int rng_probe(struct platform_device *pdev)
{ {
void __iomem *rng_regs; void __iomem *rng_regs;
struct device_node *rng_np = ofdev->dev.of_node; struct resource *res;
struct resource res;
int err = 0;
err = of_address_to_resource(rng_np, 0, &res); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (err) rng_regs = devm_ioremap_resource(&pdev->dev, res);
return -ENODEV; if (IS_ERR(rng_regs))
return PTR_ERR(rng_regs);
rng_regs = ioremap(res.start, 0x100);
if (!rng_regs)
return -ENOMEM;
pasemi_rng.priv = (unsigned long)rng_regs; pasemi_rng.priv = (unsigned long)rng_regs;
pr_info("Registering PA Semi RNG\n"); pr_info("Registering PA Semi RNG\n");
return devm_hwrng_register(&pdev->dev, &pasemi_rng);
err = hwrng_register(&pasemi_rng);
if (err)
iounmap(rng_regs);
return err;
}
static int rng_remove(struct platform_device *dev)
{
void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
hwrng_unregister(&pasemi_rng);
iounmap(rng_regs);
return 0;
} }
static const struct of_device_id rng_match[] = { static const struct of_device_id rng_match[] = {
...@@ -146,7 +124,6 @@ static struct platform_driver rng_driver = { ...@@ -146,7 +124,6 @@ static struct platform_driver rng_driver = {
.of_match_table = rng_match, .of_match_table = rng_match,
}, },
.probe = rng_probe, .probe = rng_probe,
.remove = rng_remove,
}; };
module_platform_driver(rng_driver); module_platform_driver(rng_driver);
......
...@@ -143,7 +143,6 @@ static struct platform_driver pic32_rng_driver = { ...@@ -143,7 +143,6 @@ static struct platform_driver pic32_rng_driver = {
.remove = pic32_rng_remove, .remove = pic32_rng_remove,
.driver = { .driver = {
.name = "pic32-rng", .name = "pic32-rng",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_rng_of_match), .of_match_table = of_match_ptr(pic32_rng_of_match),
}, },
}; };
......
...@@ -54,9 +54,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) ...@@ -54,9 +54,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
u32 status; u32 status;
int i; int i;
if (max < sizeof(u16))
return -EINVAL;
/* Wait until FIFO is full - max 4uS*/ /* Wait until FIFO is full - max 4uS*/
for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) { for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) {
status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG); status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG);
...@@ -111,6 +108,7 @@ static int st_rng_probe(struct platform_device *pdev) ...@@ -111,6 +108,7 @@ static int st_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&ddata->ops); ret = hwrng_register(&ddata->ops);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Failed to register HW RNG\n"); dev_err(&pdev->dev, "Failed to register HW RNG\n");
clk_disable_unprepare(clk);
return ret; return ret;
} }
......
...@@ -144,22 +144,13 @@ static int __init tx4939_rng_probe(struct platform_device *dev) ...@@ -144,22 +144,13 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
} }
platform_set_drvdata(dev, rngdev); platform_set_drvdata(dev, rngdev);
return hwrng_register(&rngdev->rng); return devm_hwrng_register(&dev->dev, &rngdev->rng);
}
static int __exit tx4939_rng_remove(struct platform_device *dev)
{
struct tx4939_rng *rngdev = platform_get_drvdata(dev);
hwrng_unregister(&rngdev->rng);
return 0;
} }
static struct platform_driver tx4939_rng_driver = { static struct platform_driver tx4939_rng_driver = {
.driver = { .driver = {
.name = "tx4939-rng", .name = "tx4939-rng",
}, },
.remove = tx4939_rng_remove,
}; };
module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe); module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
......
...@@ -318,6 +318,9 @@ config CRYPTO_DEV_OMAP_AES ...@@ -318,6 +318,9 @@ config CRYPTO_DEV_OMAP_AES
select CRYPTO_AES select CRYPTO_AES
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE select CRYPTO_ENGINE
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CTR
help help
OMAP processors have AES module accelerator. Select this if you OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms. want to use the OMAP module for AES algorithms.
......
...@@ -111,6 +111,42 @@ ...@@ -111,6 +111,42 @@
#else #else
#define debug(format, arg...) #define debug(format, arg...)
#endif #endif
#ifdef DEBUG
#include <linux/highmem.h>
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
struct scatterlist *sg, size_t tlen, bool ascii,
bool may_sleep)
{
struct scatterlist *it;
void *it_page;
size_t len;
void *buf;
for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
/*
* make sure the scatterlist's page
* has a valid virtual memory mapping
*/
it_page = kmap_atomic(sg_page(it));
if (unlikely(!it_page)) {
printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
return;
}
buf = it_page + it->offset;
len = min(tlen, it->length);
print_hex_dump(level, prefix_str, prefix_type, rowsize,
groupsize, buf, len, ascii);
tlen -= len;
kunmap_atomic(it_page);
}
}
#endif
static struct list_head alg_list; static struct list_head alg_list;
struct caam_alg_entry { struct caam_alg_entry {
...@@ -227,7 +263,8 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx, ...@@ -227,7 +263,8 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
if (is_rfc3686) { if (is_rfc3686) {
nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
enckeylen); enckeylen);
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, append_move(desc,
MOVE_SRC_OUTFIFO | MOVE_SRC_OUTFIFO |
...@@ -500,8 +537,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -500,8 +537,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */ /* Load Counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT | LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
...@@ -578,8 +614,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -578,8 +614,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */ /* Load Counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT | LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
...@@ -683,8 +718,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -683,8 +718,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */ /* Load Counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT | LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
...@@ -1478,7 +1512,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1478,7 +1512,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0; int ret = 0;
u32 *key_jump_cmd; u32 *key_jump_cmd;
u32 *desc; u32 *desc;
u32 *nonce; u8 *nonce;
u32 geniv; u32 geniv;
u32 ctx1_iv_off = 0; u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
...@@ -1531,8 +1565,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1531,8 +1565,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */ /* Load nonce into CONTEXT1 reg */
if (is_rfc3686) { if (is_rfc3686) {
nonce = (u32 *)(key + keylen); nonce = (u8 *)key + keylen;
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP | append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO | MOVE_SRC_OUTFIFO |
...@@ -1549,8 +1584,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1549,8 +1584,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */ /* Load counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT | LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
...@@ -1590,8 +1624,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1590,8 +1624,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */ /* Load nonce into CONTEXT1 reg */
if (is_rfc3686) { if (is_rfc3686) {
nonce = (u32 *)(key + keylen); nonce = (u8 *)key + keylen;
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP | append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO | MOVE_SRC_OUTFIFO |
...@@ -1608,8 +1643,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1608,8 +1643,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */ /* Load counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT | LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
...@@ -1653,8 +1687,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1653,8 +1687,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Nonce into CONTEXT1 reg */ /* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) { if (is_rfc3686) {
nonce = (u32 *)(key + keylen); nonce = (u8 *)key + keylen;
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP | append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO | MOVE_SRC_OUTFIFO |
...@@ -1685,8 +1720,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ...@@ -1685,8 +1720,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Counter into CONTEXT1 reg */ /* Load Counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
append_load_imm_u32(desc, (u32)1, LDST_IMM | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT | LDST_SRCDST_BYTE_CONTEXT |
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
LDST_OFFSET_SHIFT)); LDST_OFFSET_SHIFT));
...@@ -1995,9 +2029,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -1995,9 +2029,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info, DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1); edesc->src_nents > 1 ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->nbytes, 1); edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif #endif
ablkcipher_unmap(jrdev, edesc, req); ablkcipher_unmap(jrdev, edesc, req);
...@@ -2027,9 +2061,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -2027,9 +2061,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info, DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1); ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->nbytes, 1); edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif #endif
ablkcipher_unmap(jrdev, edesc, req); ablkcipher_unmap(jrdev, edesc, req);
...@@ -2184,12 +2218,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, ...@@ -2184,12 +2218,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0; int len, sec4_sg_index = 0;
#ifdef DEBUG #ifdef DEBUG
bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info, DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1); ivsize, 1);
print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
edesc->src_nents ? 100 : req->nbytes, 1); DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif #endif
len = desc_len(sh_desc); len = desc_len(sh_desc);
...@@ -2241,12 +2278,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, ...@@ -2241,12 +2278,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0; int len, sec4_sg_index = 0;
#ifdef DEBUG #ifdef DEBUG
bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info, DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1); ivsize, 1);
print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ", dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents ? 100 : req->nbytes, 1); edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif #endif
len = desc_len(sh_desc); len = desc_len(sh_desc);
...@@ -2516,18 +2555,20 @@ static int aead_decrypt(struct aead_request *req) ...@@ -2516,18 +2555,20 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc; u32 *desc;
int ret = 0; int ret = 0;
#ifdef DEBUG
bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req->assoclen + req->cryptlen, 1, may_sleep);
#endif
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
&all_contig, false); &all_contig, false);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
req->assoclen + req->cryptlen, 1);
#endif
/* Create and submit job descriptor*/ /* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false); init_authenc_job(req, edesc, all_contig, false);
#ifdef DEBUG #ifdef DEBUG
......
This diff is collapsed.
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "jr.h" #include "jr.h"
#include "desc_constr.h" #include "desc_constr.h"
#include "error.h" #include "error.h"
#include "ctrl.h"
bool caam_little_end; bool caam_little_end;
EXPORT_SYMBOL(caam_little_end); EXPORT_SYMBOL(caam_little_end);
...@@ -826,6 +827,8 @@ static int caam_probe(struct platform_device *pdev) ...@@ -826,6 +827,8 @@ static int caam_probe(struct platform_device *pdev)
caam_remove: caam_remove:
caam_remove(pdev); caam_remove(pdev);
return ret;
iounmap_ctrl: iounmap_ctrl:
iounmap(ctrl); iounmap(ctrl);
disable_caam_emi_slow: disable_caam_emi_slow:
......
...@@ -23,13 +23,7 @@ ...@@ -23,13 +23,7 @@
#define SEC4_SG_OFFSET_MASK 0x00001fff #define SEC4_SG_OFFSET_MASK 0x00001fff
struct sec4_sg_entry { struct sec4_sg_entry {
#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
u32 rsvd1;
dma_addr_t ptr;
#else
u64 ptr; u64 ptr;
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len; u32 len;
u32 bpid_offset; u32 bpid_offset;
}; };
......
...@@ -324,6 +324,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ ...@@ -324,6 +324,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
} }
APPEND_CMD_RAW_IMM(load, LOAD, u32); APPEND_CMD_RAW_IMM(load, LOAD, u32);
/*
* ee - endianness
* size - size of immediate type in bytes
*/
#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
u##size immediate, \
u32 options) \
{ \
__##ee##size data = cpu_to_##ee##size(immediate); \
PRINT_POS; \
append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
append_data(desc, &data, sizeof(data)); \
}
APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
/* /*
* Append math command. Only the last part of destination and source need to * Append math command. Only the last part of destination and source need to
* be specified * be specified
......
...@@ -41,7 +41,6 @@ struct caam_drv_private_jr { ...@@ -41,7 +41,6 @@ struct caam_drv_private_jr {
struct device *dev; struct device *dev;
int ridx; int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */ struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */ int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */ /* Number of scatterlist crypt transforms active on the JobR */
......
...@@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev) ...@@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev); ret = caam_reset_hw_jr(dev);
tasklet_kill(&jrp->irqtask);
/* Release interrupt */ /* Release interrupt */
free_irq(jrp->irq, dev); free_irq(jrp->irq, dev);
...@@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) ...@@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* /*
* Check the output ring for ready responses, kick * Check the output ring for ready responses, kick
* tasklet if jobs done. * the threaded irq if jobs done.
*/ */
irqstate = rd_reg32(&jrp->rregs->jrintstatus); irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate) if (!irqstate)
...@@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) ...@@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */ /* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate); wr_reg32(&jrp->rregs->jrintstatus, irqstate);
preempt_disable(); return IRQ_WAKE_THREAD;
tasklet_schedule(&jrp->irqtask);
preempt_enable();
return IRQ_HANDLED;
} }
/* Deferred service handler, run as interrupt-fired tasklet */ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
static void caam_jr_dequeue(unsigned long devarg)
{ {
int hw_idx, sw_idx, i, head, tail; int hw_idx, sw_idx, i, head, tail;
struct device *dev = (struct device *)devarg; struct device *dev = st_dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus; u32 *userdesc, userstatus;
...@@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg) ...@@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg)
/* reenable / unmask IRQs */ /* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
return IRQ_HANDLED;
} }
/** /**
...@@ -394,10 +389,9 @@ static int caam_jr_init(struct device *dev) ...@@ -394,10 +389,9 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev); jrp = dev_get_drvdata(dev);
tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
/* Connect job ring interrupt handler. */ /* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
caam_jr_threadirq, IRQF_SHARED,
dev_name(dev), dev); dev_name(dev), dev);
if (error) { if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n", dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
...@@ -460,7 +454,6 @@ static int caam_jr_init(struct device *dev) ...@@ -460,7 +454,6 @@ static int caam_jr_init(struct device *dev)
out_free_irq: out_free_irq:
free_irq(jrp->irq, dev); free_irq(jrp->irq, dev);
out_kill_deq: out_kill_deq:
tasklet_kill(&jrp->irqtask);
return error; return error;
} }
...@@ -513,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev) ...@@ -513,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev)
error = caam_jr_init(jrdev); /* now turn on hardware */ error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) { if (error) {
irq_dispose_mapping(jrpriv->irq); irq_dispose_mapping(jrpriv->irq);
iounmap(ctrl);
return error; return error;
} }
......
...@@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg) ...@@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg)
#define caam_dma_to_cpu(value) caam32_to_cpu(value) #define caam_dma_to_cpu(value) caam32_to_cpu(value)
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
#define cpu_to_caam_dma64(value) \
(((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
(u64)cpu_to_caam32(upper_32_bits(value)))
#else
#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
#endif
/* /*
* jr_outentry * jr_outentry
* Represents each entry in a JobR output ring * Represents each entry in a JobR output ring
......
...@@ -15,7 +15,7 @@ struct sec4_sg_entry; ...@@ -15,7 +15,7 @@ struct sec4_sg_entry;
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset) dma_addr_t dma, u32 len, u16 offset)
{ {
sec4_sg_ptr->ptr = cpu_to_caam_dma(dma); sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
sec4_sg_ptr->len = cpu_to_caam32(len); sec4_sg_ptr->len = cpu_to_caam32(len);
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
#ifdef DEBUG #ifdef DEBUG
......
...@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o ...@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o \ ccp-objs := ccp-dev.o \
ccp-ops.o \ ccp-ops.o \
ccp-dev-v3.o \ ccp-dev-v3.o \
ccp-dev-v5.o \
ccp-platform.o \ ccp-platform.o \
ccp-dmaengine.o ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o ccp-$(CONFIG_PCI) += ccp-pci.o
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
* *
* Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, ...@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
rctx->cmd.engine = CCP_ENGINE_SHA; rctx->cmd.engine = CCP_ENGINE_SHA;
rctx->cmd.u.sha.type = rctx->type; rctx->cmd.u.sha.type = rctx->type;
rctx->cmd.u.sha.ctx = &rctx->ctx_sg; rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
switch (rctx->type) {
case CCP_SHA_TYPE_1:
rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_224:
rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_256:
rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
break;
default:
/* Should never get here */
break;
}
rctx->cmd.u.sha.src = sg; rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt; rctx->cmd.u.sha.src_len = rctx->hash_cnt;
rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
* *
* Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -19,6 +20,61 @@ ...@@ -19,6 +20,61 @@
#include "ccp-dev.h" #include "ccp-dev.h"
static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
int start;
struct ccp_device *ccp = cmd_q->ccp;
for (;;) {
mutex_lock(&ccp->sb_mutex);
start = (u32)bitmap_find_next_zero_area(ccp->sb,
ccp->sb_count,
ccp->sb_start,
count, 0);
if (start <= ccp->sb_count) {
bitmap_set(ccp->sb, start, count);
mutex_unlock(&ccp->sb_mutex);
break;
}
ccp->sb_avail = 0;
mutex_unlock(&ccp->sb_mutex);
/* Wait for KSB entries to become available */
if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
return 0;
}
return KSB_START + start;
}
static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
struct ccp_device *ccp = cmd_q->ccp;
if (!start)
return;
mutex_lock(&ccp->sb_mutex);
bitmap_clear(ccp->sb, start - KSB_START, count);
ccp->sb_avail = 1;
mutex_unlock(&ccp->sb_mutex);
wake_up_interruptible_all(&ccp->sb_queue);
}
static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
{
return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
}
static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
{ {
struct ccp_cmd_queue *cmd_q = op->cmd_q; struct ccp_cmd_queue *cmd_q = op->cmd_q;
...@@ -68,6 +124,9 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) ...@@ -68,6 +124,9 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
/* On error delete all related jobs from the queue */ /* On error delete all related jobs from the queue */
cmd = (cmd_q->id << DEL_Q_ID_SHIFT) cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
| op->jobid; | op->jobid;
if (cmd_q->cmd_error)
ccp_log_error(cmd_q->ccp,
cmd_q->cmd_error);
iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
...@@ -99,10 +158,10 @@ static int ccp_perform_aes(struct ccp_op *op) ...@@ -99,10 +158,10 @@ static int ccp_perform_aes(struct ccp_op *op)
| (op->u.aes.type << REQ1_AES_TYPE_SHIFT) | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
| (op->u.aes.mode << REQ1_AES_MODE_SHIFT) | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
| (op->u.aes.action << REQ1_AES_ACTION_SHIFT) | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
| (op->ksb_key << REQ1_KEY_KSB_SHIFT); | (op->sb_key << REQ1_KEY_KSB_SHIFT);
cr[1] = op->src.u.dma.length - 1; cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma); cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma); | ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma);
...@@ -129,10 +188,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op) ...@@ -129,10 +188,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op)
cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
| (op->u.xts.action << REQ1_AES_ACTION_SHIFT) | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
| (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
| (op->ksb_key << REQ1_KEY_KSB_SHIFT); | (op->sb_key << REQ1_KEY_KSB_SHIFT);
cr[1] = op->src.u.dma.length - 1; cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma); cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma); | ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma);
...@@ -158,7 +217,7 @@ static int ccp_perform_sha(struct ccp_op *op) ...@@ -158,7 +217,7 @@ static int ccp_perform_sha(struct ccp_op *op)
| REQ1_INIT; | REQ1_INIT;
cr[1] = op->src.u.dma.length - 1; cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma); cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma); | ccp_addr_hi(&op->src.u.dma);
...@@ -181,11 +240,11 @@ static int ccp_perform_rsa(struct ccp_op *op) ...@@ -181,11 +240,11 @@ static int ccp_perform_rsa(struct ccp_op *op)
/* Fill out the register contents for REQ1 through REQ6 */ /* Fill out the register contents for REQ1 through REQ6 */
cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
| (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
| (op->ksb_key << REQ1_KEY_KSB_SHIFT) | (op->sb_key << REQ1_KEY_KSB_SHIFT)
| REQ1_EOM; | REQ1_EOM;
cr[1] = op->u.rsa.input_len - 1; cr[1] = op->u.rsa.input_len - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma); cr[2] = ccp_addr_lo(&op->src.u.dma);
cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma); | ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma);
...@@ -215,10 +274,10 @@ static int ccp_perform_passthru(struct ccp_op *op) ...@@ -215,10 +274,10 @@ static int ccp_perform_passthru(struct ccp_op *op)
| ccp_addr_hi(&op->src.u.dma); | ccp_addr_hi(&op->src.u.dma);
if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
} else { } else {
cr[2] = op->src.u.ksb * CCP_KSB_BYTES; cr[2] = op->src.u.sb * CCP_SB_BYTES;
cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
} }
if (op->dst.type == CCP_MEMTYPE_SYSTEM) { if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
...@@ -226,8 +285,8 @@ static int ccp_perform_passthru(struct ccp_op *op) ...@@ -226,8 +285,8 @@ static int ccp_perform_passthru(struct ccp_op *op)
cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->dst.u.dma); | ccp_addr_hi(&op->dst.u.dma);
} else { } else {
cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; cr[4] = op->dst.u.sb * CCP_SB_BYTES;
cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
} }
if (op->eom) if (op->eom)
...@@ -256,35 +315,6 @@ static int ccp_perform_ecc(struct ccp_op *op) ...@@ -256,35 +315,6 @@ static int ccp_perform_ecc(struct ccp_op *op)
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
} }
static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
int len = min_t(int, sizeof(trng_value), max);
/*
* Locking is provided by the caller so we can update device
* hwrng-related fields safely
*/
trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
if (!trng_value) {
/* Zero is returned if not data is available or if a
* bad-entropy error is present. Assume an error if
* we exceed TRNG_RETRIES reads of zero.
*/
if (ccp->hwrng_retries++ > TRNG_RETRIES)
return -EIO;
return 0;
}
/* Reset the counter and save the rng value */
ccp->hwrng_retries = 0;
memcpy(data, &trng_value, len);
return len;
}
static int ccp_init(struct ccp_device *ccp) static int ccp_init(struct ccp_device *ccp)
{ {
struct device *dev = ccp->dev; struct device *dev = ccp->dev;
...@@ -321,9 +351,9 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -321,9 +351,9 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q->dma_pool = dma_pool; cmd_q->dma_pool = dma_pool;
/* Reserve 2 KSB regions for the queue */ /* Reserve 2 KSB regions for the queue */
cmd_q->ksb_key = KSB_START + ccp->ksb_start++; cmd_q->sb_key = KSB_START + ccp->sb_start++;
cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
ccp->ksb_count -= 2; ccp->sb_count -= 2;
/* Preset some register values and masks that are queue /* Preset some register values and masks that are queue
* number dependent * number dependent
...@@ -335,7 +365,7 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -335,7 +365,7 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q->int_ok = 1 << (i * 2); cmd_q->int_ok = 1 << (i * 2);
cmd_q->int_err = 1 << ((i * 2) + 1); cmd_q->int_err = 1 << ((i * 2) + 1);
cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); cmd_q->free_slots = ccp_get_free_slots(cmd_q);
init_waitqueue_head(&cmd_q->int_queue); init_waitqueue_head(&cmd_q->int_queue);
...@@ -375,9 +405,10 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -375,9 +405,10 @@ static int ccp_init(struct ccp_device *ccp)
} }
/* Initialize the queues used to wait for KSB space and suspend */ /* Initialize the queues used to wait for KSB space and suspend */
init_waitqueue_head(&ccp->ksb_queue); init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue); init_waitqueue_head(&ccp->suspend_queue);
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */ /* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) { for (i = 0; i < ccp->cmd_q_count; i++) {
struct task_struct *kthread; struct task_struct *kthread;
...@@ -397,29 +428,26 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -397,29 +428,26 @@ static int ccp_init(struct ccp_device *ccp)
wake_up_process(kthread); wake_up_process(kthread);
} }
/* Register the RNG */ dev_dbg(dev, "Enabling interrupts...\n");
ccp->hwrng.name = ccp->rngname; /* Enable interrupts */
ccp->hwrng.read = ccp_trng_read; iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
ret = hwrng_register(&ccp->hwrng);
if (ret) { dev_dbg(dev, "Registering device...\n");
dev_err(dev, "error registering hwrng (%d)\n", ret); ccp_add_device(ccp);
ret = ccp_register_rng(ccp);
if (ret)
goto e_kthread; goto e_kthread;
}
/* Register the DMA engine support */ /* Register the DMA engine support */
ret = ccp_dmaengine_register(ccp); ret = ccp_dmaengine_register(ccp);
if (ret) if (ret)
goto e_hwrng; goto e_hwrng;
ccp_add_device(ccp);
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
return 0; return 0;
e_hwrng: e_hwrng:
hwrng_unregister(&ccp->hwrng); ccp_unregister_rng(ccp);
e_kthread: e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++) for (i = 0; i < ccp->cmd_q_count; i++)
...@@ -441,19 +469,14 @@ static void ccp_destroy(struct ccp_device *ccp) ...@@ -441,19 +469,14 @@ static void ccp_destroy(struct ccp_device *ccp)
struct ccp_cmd *cmd; struct ccp_cmd *cmd;
unsigned int qim, i; unsigned int qim, i;
/* Remove this device from the list of available units first */
ccp_del_device(ccp);
/* Unregister the DMA engine */ /* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp); ccp_dmaengine_unregister(ccp);
/* Unregister the RNG */ /* Unregister the RNG */
hwrng_unregister(&ccp->hwrng); ccp_unregister_rng(ccp);
/* Stop the queue kthreads */ /* Remove this device from the list of available units */
for (i = 0; i < ccp->cmd_q_count; i++) ccp_del_device(ccp);
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
/* Build queue interrupt mask (two interrupt masks per queue) */ /* Build queue interrupt mask (two interrupt masks per queue) */
qim = 0; qim = 0;
...@@ -472,6 +495,11 @@ static void ccp_destroy(struct ccp_device *ccp) ...@@ -472,6 +495,11 @@ static void ccp_destroy(struct ccp_device *ccp)
} }
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
/* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
ccp->free_irq(ccp); ccp->free_irq(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) for (i = 0; i < ccp->cmd_q_count; i++)
...@@ -527,18 +555,24 @@ static irqreturn_t ccp_irq_handler(int irq, void *data) ...@@ -527,18 +555,24 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
} }
static const struct ccp_actions ccp3_actions = { static const struct ccp_actions ccp3_actions = {
.perform_aes = ccp_perform_aes, .aes = ccp_perform_aes,
.perform_xts_aes = ccp_perform_xts_aes, .xts_aes = ccp_perform_xts_aes,
.perform_sha = ccp_perform_sha, .sha = ccp_perform_sha,
.perform_rsa = ccp_perform_rsa, .rsa = ccp_perform_rsa,
.perform_passthru = ccp_perform_passthru, .passthru = ccp_perform_passthru,
.perform_ecc = ccp_perform_ecc, .ecc = ccp_perform_ecc,
.sballoc = ccp_alloc_ksb,
.sbfree = ccp_free_ksb,
.init = ccp_init, .init = ccp_init,
.destroy = ccp_destroy, .destroy = ccp_destroy,
.get_free_slots = ccp_get_free_slots,
.irqhandler = ccp_irq_handler, .irqhandler = ccp_irq_handler,
}; };
struct ccp_vdata ccpv3 = { const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0), .version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions, .perform = &ccp3_actions,
.bar = 2,
.offset = 0x20000,
}; };
This diff is collapsed.
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
* *
* Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -39,6 +40,59 @@ struct ccp_tasklet_data { ...@@ -39,6 +40,59 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd; struct ccp_cmd *cmd;
}; };
/* Human-readable error strings */
char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
"ERR 03: ILLEGAL_FUNCTION_TYPE",
"ERR 04: ILLEGAL_FUNCTION_MODE",
"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
"ERR 06: ILLEGAL_FUNCTION_SIZE",
"ERR 07: Zlib_MISSING_INIT_EOM",
"ERR 08: ILLEGAL_FUNCTION_RSVD",
"ERR 09: ILLEGAL_BUFFER_LENGTH",
"ERR 10: VLSB_FAULT",
"ERR 11: ILLEGAL_MEM_ADDR",
"ERR 12: ILLEGAL_MEM_SEL",
"ERR 13: ILLEGAL_CONTEXT_ID",
"ERR 14: ILLEGAL_KEY_ADDR",
"ERR 15: 0xF Reserved",
"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
"ERR 18: CMD_TIMEOUT",
"ERR 19: IDMA0_AXI_SLVERR",
"ERR 20: IDMA0_AXI_DECERR",
"ERR 21: 0x15 Reserved",
"ERR 22: IDMA1_AXI_SLAVE_FAULT",
"ERR 23: IDMA1_AIXI_DECERR",
"ERR 24: 0x18 Reserved",
"ERR 25: ZLIBVHB_AXI_SLVERR",
"ERR 26: ZLIBVHB_AXI_DECERR",
"ERR 27: 0x1B Reserved",
"ERR 27: ZLIB_UNEXPECTED_EOM",
"ERR 27: ZLIB_EXTRA_DATA",
"ERR 30: ZLIB_BTYPE",
"ERR 31: ZLIB_UNDEFINED_SYMBOL",
"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
"ERR 35: ZLIB_UNCOMPRESSED_LEN",
"ERR 36: ZLIB_LIMIT_REACHED",
"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
"ERR 38: ODMA0_AXI_SLVERR",
"ERR 39: ODMA0_AXI_DECERR",
"ERR 40: 0x28 Reserved",
"ERR 41: ODMA1_AXI_SLVERR",
"ERR 42: ODMA1_AXI_DECERR",
"ERR 43: LSB_PARITY_ERR",
};
void ccp_log_error(struct ccp_device *d, int e)
{
dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
}
/* List of CCPs, CCP count, read-write access lock, and access functions /* List of CCPs, CCP count, read-write access lock, and access functions
* *
* Lock structure: get ccp_unit_lock for reading whenever we need to * Lock structure: get ccp_unit_lock for reading whenever we need to
...@@ -58,7 +112,7 @@ static struct ccp_device *ccp_rr; ...@@ -58,7 +112,7 @@ static struct ccp_device *ccp_rr;
/* Ever-increasing value to produce unique unit numbers */ /* Ever-increasing value to produce unique unit numbers */
static atomic_t ccp_unit_ordinal; static atomic_t ccp_unit_ordinal;
unsigned int ccp_increment_unit_ordinal(void) static unsigned int ccp_increment_unit_ordinal(void)
{ {
return atomic_inc_return(&ccp_unit_ordinal); return atomic_inc_return(&ccp_unit_ordinal);
} }
...@@ -118,6 +172,29 @@ void ccp_del_device(struct ccp_device *ccp) ...@@ -118,6 +172,29 @@ void ccp_del_device(struct ccp_device *ccp)
write_unlock_irqrestore(&ccp_unit_lock, flags); write_unlock_irqrestore(&ccp_unit_lock, flags);
} }
int ccp_register_rng(struct ccp_device *ccp)
{
int ret = 0;
dev_dbg(ccp->dev, "Registering RNG...\n");
/* Register an RNG */
ccp->hwrng.name = ccp->rngname;
ccp->hwrng.read = ccp_trng_read;
ret = hwrng_register(&ccp->hwrng);
if (ret)
dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
return ret;
}
void ccp_unregister_rng(struct ccp_device *ccp)
{
if (ccp->hwrng.name)
hwrng_unregister(&ccp->hwrng);
}
static struct ccp_device *ccp_get_device(void) static struct ccp_device *ccp_get_device(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -397,9 +474,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) ...@@ -397,9 +474,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
spin_lock_init(&ccp->cmd_lock); spin_lock_init(&ccp->cmd_lock);
mutex_init(&ccp->req_mutex); mutex_init(&ccp->req_mutex);
mutex_init(&ccp->ksb_mutex); mutex_init(&ccp->sb_mutex);
ccp->ksb_count = KSB_COUNT; ccp->sb_count = KSB_COUNT;
ccp->ksb_start = 0; ccp->sb_start = 0;
ccp->ord = ccp_increment_unit_ordinal(); ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
...@@ -408,6 +485,34 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) ...@@ -408,6 +485,34 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
return ccp; return ccp;
} }
int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
int len = min_t(int, sizeof(trng_value), max);
/* Locking is provided by the caller so we can update device
* hwrng-related fields safely
*/
trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
if (!trng_value) {
/* Zero is returned if not data is available or if a
* bad-entropy error is present. Assume an error if
* we exceed TRNG_RETRIES reads of zero.
*/
if (ccp->hwrng_retries++ > TRNG_RETRIES)
return -EIO;
return 0;
}
/* Reset the counter and save the rng value */
ccp->hwrng_retries = 0;
memcpy(data, &trng_value, len);
return len;
}
#ifdef CONFIG_PM #ifdef CONFIG_PM
bool ccp_queues_suspended(struct ccp_device *ccp) bool ccp_queues_suspended(struct ccp_device *ccp)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv) ...@@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
if (!req) if (!req)
break; break;
ctx = crypto_tfm_ctx(req->tfm);
mv_cesa_complete_req(ctx, req, 0); mv_cesa_complete_req(ctx, req, 0);
} }
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment