Commit 324429d7 authored by Hariprasad Shenai's avatar Hariprasad Shenai Committed by David S. Miller

chcr: Support for Chelsio's Crypto Hardware

The Chelsio's Crypto Hardware can perform the following operations:
SHA1, SHA224, SHA256, SHA384 and SHA512, HMAC(SHA1), HMAC(SHA224),
HMAC(SHA256), HMAC(SHA384), HAMC(SHA512), AES-128-CBC, AES-192-CBC,
AES-256-CBC, AES-128-XTS, AES-256-XTS

This patch implements the driver for above mentioned features. This
driver is an Upper Layer Driver which is attached to Chelsio's LLD
(cxgb4) and uses the queue allocated by the LLD for sending the crypto
requests to the Hardware and receiving the responses from it.

The crypto operations can be performed by Chelsio's hardware from the
userspace applications and/or from within the kernel space using the
kernel's crypto API.

The above mentioned crypto features have been tested using kernel's
tests mentioned in testmgr.h. They also have been tested from user
space using libkcapi and Openssl.
Signed-off-by: default avatarAtul Gupta <atul.gupta@chelsio.com>
Signed-off-by: default avatarHariprasad Shenai <hariprasad@chelsio.com>
Acked-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d6657781
This diff is collapsed.
This diff is collapsed.
/**
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written and Maintained by:
* Manoj Malviya (manojmalviya@chelsio.com)
* Atul Gupta (atul.gupta@chelsio.com)
* Jitendra Lulla (jlulla@chelsio.com)
* Yeshaswi M R Gowda (yeshaswi@chelsio.com)
* Harsh Jain (harsh@chelsio.com)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
#include "t4_msg.h"
#include "chcr_core.h"
#include "cxgb4_uld.h"
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static atomic_t dev_count;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
};
static struct cxgb4_pci_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = 4,
.rxq_size = 1024,
.nciq = 0,
.ciq_size = 0,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
};
int assign_chcr_device(struct chcr_dev **dev)
{
struct uld_ctx *u_ctx;
/*
* Which device to use if multiple devices are available TODO
* May be select the device based on round robin. One session
* must go to the same device to maintain the ordering.
*/
mutex_lock(&dev_mutex); /* TODO ? */
u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
if (!u_ctx) {
mutex_unlock(&dev_mutex);
return -ENXIO;
}
*dev = u_ctx->dev;
mutex_unlock(&dev_mutex);
return 0;
}
static int chcr_dev_add(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENXIO;
spin_lock_init(&dev->lock_chcr_dev);
u_ctx->dev = dev;
dev->u_ctx = u_ctx;
atomic_inc(&dev_count);
return 0;
}
static int chcr_dev_remove(struct uld_ctx *u_ctx)
{
kfree(u_ctx->dev);
u_ctx->dev = NULL;
atomic_dec(&dev_count);
return 0;
}
static int cpl_fw6_pld_handler(struct chcr_dev *dev,
unsigned char *input)
{
struct crypto_async_request *req;
struct cpl_fw6_pld *fw6_pld;
u32 ack_err_status = 0;
int error_status = 0;
fw6_pld = (struct cpl_fw6_pld *)input;
req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
fw6_pld->data[1]);
ack_err_status =
ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
error_status = -EINVAL;
}
/* call completion callback with failure status */
if (req) {
if (!chcr_handle_resp(req, input, error_status))
req->complete(req, error_status);
else
return -EINVAL;
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
}
return 0;
}
int chcr_send_wr(struct sk_buff *skb)
{
return cxgb4_ofld_send(skb->dev, skb);
}
static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
{
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
if (!u_ctx) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
u_ctx->lldi = *lld;
mutex_lock(&dev_mutex);
list_add_tail(&u_ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
out:
return u_ctx;
}
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
struct chcr_dev *dev = u_ctx->dev;
const struct cpl_act_establish *rpl = (struct cpl_act_establish
*)rsp;
if (rpl->ot.opcode != CPL_FW6_PLD) {
pr_err("Unsupported opcode\n");
return 0;
}
if (!pgl)
work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]);
else
work_handlers[rpl->ot.opcode](dev, pgl->va);
return 0;
}
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
int ret = 0;
switch (state) {
case CXGB4_STATE_UP:
if (!u_ctx->dev) {
ret = chcr_dev_add(u_ctx);
if (ret != 0)
return ret;
}
if (atomic_read(&dev_count) == 1)
ret = start_crypto();
break;
case CXGB4_STATE_DETACH:
if (u_ctx->dev) {
mutex_lock(&dev_mutex);
chcr_dev_remove(u_ctx);
mutex_unlock(&dev_mutex);
}
if (!atomic_read(&dev_count))
stop_crypto();
break;
case CXGB4_STATE_START_RECOVERY:
case CXGB4_STATE_DOWN:
default:
break;
}
return ret;
}
static int __init chcr_crypto_init(void)
{
if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) {
pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1;
}
return 0;
}
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
if (atomic_read(&dev_count))
stop_crypto();
/* Remove all devices from list */
mutex_lock(&dev_mutex);
list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
if (u_ctx->dev)
chcr_dev_remove(u_ctx);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1);
}
module_init(chcr_crypto_init);
module_exit(chcr_crypto_exit);
MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(DRV_VERSION);
/*
* This file is part of the Chelsio T6 Crypto driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __CHCR_CORE_H__
#define __CHCR_CORE_H__
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
#define DRV_VERSION "1.0.0.0"
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
#define PAD_ERROR_BIT 1
#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
struct uld_ctx;
struct chcr_dev {
/* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
struct chcr_dev *dev;
};
int assign_chcr_device(struct chcr_dev **dev);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
#endif /* __CHCR_CORE_H__ */
/*
* This file is part of the Chelsio T6 Crypto driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
#define CHCR_CRA_PRIORITY 300
#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/
#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/
#define CHCR_GIVENCRYPT_OP 2
/* CPL/SCMD parameters */
#define CHCR_ENCRYPT_OP 0
#define CHCR_DECRYPT_OP 1
#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1
#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2
#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
#define CHCR_SCMD_CIPHER_MODE_NOP 0
#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
/* This are not really mac key size. They are intermediate values
* of sha engine and its size
*/
#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1
#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2
#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3
#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2
#define CHCR_KEYCTX_NO_KEY 15
#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */
#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */
#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */
#define KEY_CONTEXT_HDR_SALT_AND_PAD 16
#define flits_to_bytes(x) (x * 8)
#define IV_NOP 0
#define IV_IMMEDIATE 1
#define IV_DSGL 2
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
struct _key_ctx {
__be32 ctx_hdr;
u8 salt[MAX_SALT];
__be64 reserverd;
unsigned char key[0];
};
struct ablk_ctx {
u8 enc;
unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
unsigned int dst_nents;
struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
};
struct hmac_ctx {
struct shash_desc *desc;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
};
struct chcr_context {
struct chcr_dev *dev;
unsigned char tx_channel_id;
struct __crypto_ctx crypto_ctx[0];
};
struct chcr_ahash_req_ctx {
u32 result;
char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 bfr_len;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
};
struct chcr_alg_template {
u32 type;
u32 is_registered;
union {
struct crypto_alg crypto;
struct ahash_alg hash;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
struct sge_opaque_hdr {
void *dev;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
struct chcr_context *ctx,
unsigned short qid,
unsigned short op_type);
#endif /* __CHCR_CRYPTO_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment