Commit 1b44c5a6 authored by Antoine Ténart's avatar Antoine Ténart Committed by Herbert Xu

crypto: inside-secure - add SafeXcel EIP197 crypto engine driver

Add support for Inside Secure SafeXcel EIP197 cryptographic engine,
which can be found on Marvell Armada 7k and 8k boards. This driver
currently implements: ecb(aes), cbc(aes), sha1, sha224, sha256 and
hmac(sah1) algorithms.

Two firmwares are needed for this engine to work. Their are mostly used
for more advanced operations than the ones supported (as of now), but we
still need them to pass the data to the internal cryptographic engine.
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c28e8f21
...@@ -655,4 +655,21 @@ config CRYPTO_DEV_BCM_SPU ...@@ -655,4 +655,21 @@ config CRYPTO_DEV_BCM_SPU
source "drivers/crypto/stm32/Kconfig" source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
depends on HAS_DMA && OF
depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT)
select CRYPTO_AES
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
help
This driver interfaces with the SafeXcel EIP-197 cryptographic engine
designed by Inside Secure. Select this if you want to use CBC/ECB
chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
algorithms.
endif # CRYPTO_HW endif # CRYPTO_HW
...@@ -41,3 +41,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ ...@@ -41,3 +41,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include "safexcel.h"
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_ring *cdr,
struct safexcel_ring *rdr)
{
cdr->offset = sizeof(u32) * priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
&cdr->base_dma, GFP_KERNEL);
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
cdr->read = cdr->base;
rdr->offset = sizeof(u32) * priv->config.rd_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
rdr->read = rdr->base;
return 0;
}
inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
{
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_ring *ring)
{
void *ptr = ring->write;
if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
return ERR_PTR(-ENOMEM);
ring->write += ring->offset;
if (ring->write == ring->base_end)
ring->write = ring->base;
ring->nr++;
return ptr;
}
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
struct safexcel_ring *ring)
{
void *ptr = ring->read;
if (!ring->nr)
return ERR_PTR(-ENOENT);
ring->read += ring->offset;
if (ring->read == ring->base_end)
ring->read = ring->base;
ring->nr--;
return ptr;
}
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_ring *ring)
{
if (!ring->nr)
return;
if (ring->write == ring->base)
ring->write += (EIP197_DEFAULT_RING_SIZE - 1) * ring->offset;
else
ring->write -= ring->offset;
ring->nr--;
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
dma_addr_t context) {
struct safexcel_command_desc *cdesc;
int i;
cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
if (IS_ERR(cdesc))
return cdesc;
memset(cdesc, 0, sizeof(struct safexcel_command_desc));
cdesc->first_seg = first;
cdesc->last_seg = last;
cdesc->particle_size = data_len;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
if (first && context) {
struct safexcel_token *token =
(struct safexcel_token *)cdesc->control_data.token;
cdesc->control_data.packet_length = full_data_len;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
EIP197_OPTION_CTX_CTRL_IN_CMD;
cdesc->control_data.context_lo =
(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
cdesc->control_data.context_hi = upper_32_bits(context);
/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
cdesc->control_data.refresh = 2;
for (i = 0; i < EIP197_MAX_TOKENS; i++)
eip197_noop_token(&token[i]);
}
return cdesc;
}
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
if (IS_ERR(rdesc))
return rdesc;
memset(rdesc, 0, sizeof(struct safexcel_result_desc));
rdesc->first_seg = first;
rdesc->last_seg = last;
rdesc->particle_size = len;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
return rdesc;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment