Commit 363dc396 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb-crypto'

Hariprasad Shenai says:

====================
crypto/chcr: Add support for Chelsio Crypto Driver

This patch series adds support for Chelsio Crypto driver.

The patch series has been created against net-next tree and includes
patches for Chelsio Low Level Driver(cxgb4) and adds the new crypto
Upper Layer Driver(chcr) under a new directory drivers/crypto/chelsio.

Patch 1/4 ("cxgb4: Add support for dynamic allocation of resources for
ULD") adds support for dynamic allocation of resources for ULD. The
objective of this patch is to provide generic interface for upper layer
drivers to allocate and initialize hardware resources.

The present cxgb4 (network driver) apart from network functionality, also
initializes hardware and thus acts as lower layer driver for other drivers
to use hardware resources. Thus it acts as both a Low level driver for
Upper layer driver's like iw_cxgb4, cxgb4i and cxgb4it and a Network Driver.

Right now the allocation of resources for Upper layer driver's is done
statically. Patch 1/4 adds a new infrastructure for dynamic allocation of
resources. cxgb4 will read the hardware capability through firmware and
allocate/free the queues for Upper layer drivers when the respective
driver's are loaded and freed when unloaded.

Patch 2/3, 3/4 and 4/4 adds support for Chelsio Crypto Driver. The Crypto
driver will act as another ULD on top of cxgb4.

In this patch series, the ULD API framework is used only by crypto and other
ULD's will make use of it in the next series.

This patch series is only for review, if this looks ok we will test it
thoroughly and send request for merge.

We have included all the maintainers of respective drivers. Kindly
review the changes and provide feedback on the same.

V3: - Removed crypto queues from cxgb4 and added support for dynamic
      allocation of resources for Upper layer drivers
    - Dependency fix in Kconfig.

V2: - Some residual code cleanup
    - Adds pr_fmt with chcr (KBUILD_MODNAME) added
    - Changes var name to accomodate them <80 columns in the chcr_register_alg
    - Support for printing the crypto queue stats
    - Fix compile warnings reported by kbuild bot for certain architectures
    - Dependency fix in Kconfig.
    - If the request has the MAY_BACKLOG bit set and hardware queue is
      full the request is queued up else -EBUSY is returned to throttle
      the user. The queue when executed and processed returns -EINPROGRESS
      in completion.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b65b24d4 02038fd6
...@@ -550,4 +550,6 @@ config CRYPTO_DEV_ROCKCHIP ...@@ -550,4 +550,6 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator. This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
source "drivers/crypto/chelsio/Kconfig"
endif # CRYPTO_HW endif # CRYPTO_HW
...@@ -31,3 +31,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ ...@@ -31,3 +31,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
config CRYPTO_DEV_CHELSIO
tristate "Chelsio Crypto Co-processor Driver"
depends on CHELSIO_T4
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called chcr.
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
/*
* This file is part of the Chelsio T6 Crypto driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Written and Maintained by:
* Manoj Malviya (manojmalviya@chelsio.com)
* Atul Gupta (atul.gupta@chelsio.com)
* Jitendra Lulla (jlulla@chelsio.com)
* Yeshaswi M R Gowda (yeshaswi@chelsio.com)
* Harsh Jain (harsh@chelsio.com)
*/
#define pr_fmt(fmt) "chcr:" fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
#include "t4_msg.h"
#include "chcr_core.h"
#include "chcr_algo.h"
#include "chcr_crypto.h"
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
}
static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->hmacctx;
}
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
}
static inline int is_ofld_imm(const struct sk_buff *skb)
{
return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
}
/*
* sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
* Calculates the number of flits needed for a scatter/gather list that
* can hold the given number of entries.
*/
static inline unsigned int sgl_len(unsigned int n)
{
n--;
return (3 * n) / 2 + (n & 1) + 2;
}
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int error_status)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_req_ctx ctx_req;
struct cpl_fw6_pld *fw6_pld;
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
if (!error_status) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
}
break;
case CRYPTO_ALG_TYPE_AHASH:
ctx_req.req.ahash_req = (struct ahash_request *)req;
ctx_req.ctx.ahash_ctx =
ahash_request_ctx(ctx_req.req.ahash_req);
digestsize =
crypto_ahash_digestsize(crypto_ahash_reqtfm(
ctx_req.req.ahash_req));
updated_digestsize = digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
if (ctx_req.ctx.ahash_ctx->skb)
ctx_req.ctx.ahash_ctx->skb = NULL;
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
sizeof(struct cpl_fw6_pld),
digestsize);
} else {
memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
return 0;
}
/*
* calc_tx_flits_ofld - calculate # of flits for an offload packet
* @skb: the packet
* Returns the number of flits needed for the given offload packet.
* These packets are already fully constructed and no additional headers
* will be added.
*/
static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
{
unsigned int flits, cnt;
if (is_ofld_imm(skb))
return DIV_ROUND_UP(skb->len, 8);
flits = skb_transport_offset(skb) / 8; /* headers */
cnt = skb_shinfo(skb)->nr_frags;
if (skb_tail_pointer(skb) != skb_transport_header(skb))
cnt++;
return flits + sgl_len(cnt);
}
static struct shash_desc *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
break;
case SHA224_DIGEST_SIZE:
base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
break;
case SHA256_DIGEST_SIZE:
base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
break;
case SHA384_DIGEST_SIZE:
base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
break;
case SHA512_DIGEST_SIZE:
base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
break;
}
if (IS_ERR(base_hash)) {
pr_err("Can not allocate sha-generic algo.\n");
return (void *)base_hash;
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
GFP_KERNEL);
if (!desc)
return ERR_PTR(-ENOMEM);
desc->tfm = base_hash;
desc->flags = crypto_shash_get_flags(base_hash);
return desc;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
char *iopad, char *result_hash,
int digest_size)
{
struct sha1_state sha1_st;
struct sha256_state sha256_st;
struct sha512_state sha512_st;
int error;
if (digest_size == SHA1_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
crypto_shash_export(desc, (void *)&sha1_st);
memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
} else if (digest_size == SHA224_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
crypto_shash_export(desc, (void *)&sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA256_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
crypto_shash_export(desc, (void *)&sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA384_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
crypto_shash_export(desc, (void *)&sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else if (digest_size == SHA512_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
crypto_shash_export(desc, (void *)&sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else {
error = -EINVAL;
pr_err("Unknown digest size %d\n", digest_size);
}
return error;
}
static void chcr_change_order(char *buf, int ds)
{
int i;
if (ds == SHA512_DIGEST_SIZE) {
for (i = 0; i < (ds / sizeof(u64)); i++)
*((__be64 *)buf + i) =
cpu_to_be64(*((u64 *)buf + i));
} else {
for (i = 0; i < (ds / sizeof(u32)); i++)
*((__be32 *)buf + i) =
cpu_to_be32(*((u32 *)buf + i));
}
}
static inline int is_hmac(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
return 1;
return 0;
}
static inline unsigned int ch_nents(struct scatterlist *sg,
unsigned int *total_size)
{
unsigned int nents;
for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
nents++;
*total_size += sg->length;
}
return nents;
}
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
unsigned int out_buf_size = sg_param->obsize;
unsigned int nents = sg_param->nents, i, j, tot_len = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
phys_cpl->pcirlxorder_to_noofsgentr =
htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) |
CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
phys_cpl->rss_hdr_int.hash_val = 0;
to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
for (j = i; (nents && (j < (8 + i))); j++, nents--) {
to->len[j] = htons(sg->length);
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
if (out_buf_size) {
if (tot_len + sg_dma_len(sg) >= out_buf_size) {
to->len[j] = htons(out_buf_size -
tot_len);
return;
}
tot_len += sg_dma_len(sg);
}
sg = sg_next(sg);
}
}
}
static inline unsigned
int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg, struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
if (sg_param->nents == 0) {
pr_err("CHCR : DMA mapping failed\n");
return -EINVAL;
}
write_phys_cpl(phys_cpl, sg, sg_param);
return 0;
}
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
struct chcr_alg_template *chcr_crypto_alg =
container_of(alg, struct chcr_alg_template, alg.crypto);
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
static inline void
write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
unsigned int page_len;
skb->len += count;
skb->data_len += count;
skb->truesize += count;
while (count > 0) {
if (sg && (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
page_len = min(sg->length, count);
skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
(*frags)++;
count -= page_len;
sg = sg_next(sg);
}
}
static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
get_aes_decrypt_key(key_ctx->key, ablkctx->key,
ablkctx->enckey_len << 3);
memset(key_ctx->key + ablkctx->enckey_len, 0,
CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
ablkctx->key, ablkctx->enckey_len << 2);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
struct fw_crypto_lookaside_wr *wreq,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
unsigned int phys_dsgl)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
if (is_ofld_imm(skb)) {
immdatalen = skb->data_len;
iv_loc = IV_IMMEDIATE;
} else {
nr_frags = skb_shinfo(skb)->nr_frags;
}
wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
(kctx_len >> 4));
wreq->pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
wreq->cookie = cpu_to_be64((uintptr_t)req);
wreq->rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
(hash_sz) ? IV_NOP : iv_loc);
ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
16) - ((sizeof(*wreq)) >> 4)));
sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
((hash_sz) ? DUMMY_BYTES :
(sizeof(struct cpl_rx_phys_dsgl) +
phys_dsgl)) + immdatalen);
}
/**
* create_cipher_wr - form the WR for cipher operations
* @req: cipher req.
* @ctx: crypto driver context of the request.
* @qid: ingress qid where response of this WR should be received.
* @op_type: encryption or decryption
*/
static struct sk_buff
*create_cipher_wr(struct crypto_async_request *req_base,
struct chcr_context *ctx, unsigned short qid,
unsigned short op_type)
{
struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
struct _key_ctx *key_ctx;
struct fw_crypto_lookaside_wr *wreq;
struct cpl_tx_sec_pdu *sec_cpl;
struct cpl_rx_phys_dsgl *phys_cpl;
struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
if (!req->info)
return ERR_PTR(-EINVAL);
ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
ablkctx->enc = op_type;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
(req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
return ERR_PTR(-EINVAL);
phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
kctx_len = sizeof(*key_ctx) +
(DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
sec_cpl->op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
sec_cpl->pldlen = htonl(ivsize + req->nbytes);
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
ivsize + 1, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
0, 0);
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
0, 0, ivsize >> 1, 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
if (generate_copy_rrkey(ablkctx, key_ctx))
goto map_fail1;
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
memcpy(key_ctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
memcpy(ablkctx->iv, req->info, ivsize);
sg_init_table(&ablkctx->iv_sg, 1);
sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
sg_param.nents = ablkctx->dst_nents;
sg_param.obsize = dst_bufsize;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
&sg_param))
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
req_ctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
goto badkey_err;
memcpy(ablkctx->key, key, keylen);
ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
alignment = 8;
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
goto badkey_err;
}
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
0, 0, context_size);
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return -EINVAL;
}
int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
int ret = 0;
struct sge_ofld_txq *q;
struct adapter *adap = netdev2adap(dev);
local_bh_disable();
q = &adap->sge.ofldtxq[idx];
spin_lock(&q->sendq.lock);
if (q->full)
ret = -1;
spin_unlock(&q->sendq.lock);
local_bh_enable();
return ret;
}
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
skb = create_cipher_wr(req_base, ctx,
u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
return -EINPROGRESS;
}
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
return -EINPROGRESS;
}
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx;
unsigned int id;
int err = 0, rxq_perchan, rxq_idx;
id = smp_processor_id();
if (!ctx->dev) {
err = assign_chcr_device(&ctx->dev);
if (err) {
pr_err("chcr device assignment fails\n");
goto out;
}
u_ctx = ULD_CTX(ctx);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
ctx->dev->tx_channel_id = 0;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_channel_id = rxq_idx;
spin_unlock(&ctx->dev->lock_chcr_dev);
}
out:
return err;
}
static int chcr_cra_init(struct crypto_tfm *tfm)
{
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
static int get_alg_config(struct algo_param *params,
unsigned int auth_size)
{
switch (auth_size) {
case SHA1_DIGEST_SIZE:
params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
params->result_size = SHA1_DIGEST_SIZE;
break;
case SHA224_DIGEST_SIZE:
params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
params->result_size = SHA256_DIGEST_SIZE;
break;
case SHA256_DIGEST_SIZE:
params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
params->result_size = SHA256_DIGEST_SIZE;
break;
case SHA384_DIGEST_SIZE:
params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
params->result_size = SHA512_DIGEST_SIZE;
break;
case SHA512_DIGEST_SIZE:
params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
params->result_size = SHA512_DIGEST_SIZE;
break;
default:
pr_err("chcr : ERROR, unsupported digest size\n");
return -EINVAL;
}
return 0;
}
static inline int
write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
struct sk_buff *skb, unsigned int *frags, char *bfr,
u8 bfr_len)
{
void *page_ptr = NULL;
skb->len += bfr_len;
skb->data_len += bfr_len;
skb->truesize += bfr_len;
page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
if (!page_ptr)
return -ENOMEM;
get_page(virt_to_page(page_ptr));
req_ctx->dummy_payload_ptr = page_ptr;
memcpy(page_ptr, bfr, bfr_len);
skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
offset_in_page(page_ptr), bfr_len);
(*frags)++;
return 0;
}
/**
* create_final_hash_wr - Create hash work request
* @req - Cipher req base
*/
static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
struct _key_ctx *key_ctx;
struct fw_crypto_lookaside_wr *wreq;
struct cpl_tx_sec_pdu *sec_cpl;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int kctx_len = sizeof(*key_ctx);
u8 hash_size_in_response = 0;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len += param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
if (req_ctx->result)
hash_size_in_response = digestsize;
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
GFP_ATOMIC);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
memset(wreq, 0, transhdr_len);
sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
sec_cpl->op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
sec_cpl->aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
sec_cpl->cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
sec_cpl->seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
param->opad_needed, 0, 0);
sec_cpl->ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
if (param->opad_needed)
memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
(kctx_len >> 4));
sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
param->bfr_len);
if (param->sg_len != 0)
write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
0);
req_ctx->skb = skb;
skb_get(skb);
return skb;
}
static int chcr_ahash_update(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
if (nbytes + req_ctx->bfr_len >= bs) {
remainder = (nbytes + req_ctx->bfr_len) % bs;
nbytes = nbytes + req_ctx->bfr_len - remainder;
} else {
sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
req_ctx->bfr_len, nbytes, 0);
req_ctx->bfr_len += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
params.sg_len = nbytes - req_ctx->bfr_len;
params.bfr_len = req_ctx->bfr_len;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_final_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
req_ctx->bfr_len = remainder;
if (remainder)
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req_ctx->bfr, remainder, req->nbytes -
remainder);
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
return -EINPROGRESS;
}
static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
{
memset(bfr_ptr, 0, bs);
*bfr_ptr = 0x80;
if (bs == 64)
*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
else
*(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
}
static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx);
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
params.opad_needed = 0;
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
params.bfr_len = req_ctx->bfr_len;
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
params.bfr_len = bs;
} else {
params.scmd1 = req_ctx->data_len;
params.last = 1;
params.more = 0;
}
skb = create_final_hash_wr(req, &params);
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
return -EINPROGRESS;
}
static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
params.opad_needed = 0;
params.sg_len = req->nbytes;
params.bfr_len = req_ctx->bfr_len;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
params.bfr_len = bs;
} else {
params.scmd1 = req_ctx->data_len;
params.last = 1;
params.more = 0;
}
skb = create_final_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
return -EINPROGRESS;
}
static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
params.opad_needed = 0;
params.last = 0;
params.more = 0;
params.sg_len = req->nbytes;
params.bfr_len = 0;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req_ctx->bfr && req->nbytes == 0) {
create_last_hash_block(req_ctx->bfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
skb = create_final_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
return -EINPROGRESS;
}
static int chcr_ahash_export(struct ahash_request *areq, void *out)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
state->bfr_len = req_ctx->bfr_len;
state->data_len = req_ctx->data_len;
memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
req_ctx->bfr_len = state->bfr_len;
req_ctx->data_len = state->data_len;
req_ctx->dummy_payload_ptr = NULL;
memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
}
static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
/*
* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
if (!hmacctx->desc)
return -EINVAL;
if (keylen > bs) {
err = crypto_shash_digest(hmacctx->desc, key, keylen,
hmacctx->ipad);
if (err)
goto out;
keylen = digestsize;
} else {
memcpy(hmacctx->ipad, key, keylen);
}
memset(hmacctx->ipad + keylen, 0, bs - keylen);
memcpy(hmacctx->opad, hmacctx->ipad, bs);
for (i = 0; i < bs / sizeof(int); i++) {
*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
}
updated_digestsize = digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->opad, updated_digestsize);
out:
return err;
}
static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
int status = 0;
unsigned short context_size = 0;
if ((key_len == (AES_KEYSIZE_128 << 1)) ||
(key_len == (AES_KEYSIZE_256 << 1))) {
memcpy(ablkctx->key, key, key_len);
ablkctx->enckey_len = key_len;
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
ablkctx->key_ctx_hdr =
FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
CHCR_KEYCTX_NO_KEY, 1,
0, context_size);
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
} else {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
status = -EINVAL;
}
return status;
}
static int chcr_sha_init(struct ahash_request *areq)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
req_ctx->dummy_payload_ptr = NULL;
req_ctx->bfr_len = 0;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
return 0;
}
static int chcr_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
return chcr_device_init(crypto_tfm_ctx(tfm));
}
static int chcr_hmac_init(struct ahash_request *areq)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
unsigned int digestsize = crypto_ahash_digestsize(rtfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
chcr_sha_init(areq);
req_ctx->data_len = bs;
if (is_hmac(crypto_ahash_tfm(rtfm))) {
if (digestsize == SHA224_DIGEST_SIZE)
memcpy(req_ctx->partial_hash, hmacctx->ipad,
SHA256_DIGEST_SIZE);
else if (digestsize == SHA384_DIGEST_SIZE)
memcpy(req_ctx->partial_hash, hmacctx->ipad,
SHA512_DIGEST_SIZE);
else
memcpy(req_ctx->partial_hash, hmacctx->ipad,
digestsize);
}
return 0;
}
static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
unsigned int digestsize =
crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
hmacctx->desc = chcr_alloc_shash(digestsize);
if (IS_ERR(hmacctx->desc))
return PTR_ERR(hmacctx->desc);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
static void chcr_free_shash(struct shash_desc *desc)
{
crypto_free_shash(desc->tfm);
kfree(desc);
}
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
if (hmacctx->desc) {
chcr_free_shash(hmacctx->desc);
hmacctx->desc = NULL;
}
}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc(aes-chcr)",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chcr_context)
+ sizeof(struct ablk_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = chcr_cra_init,
.cra_exit = NULL,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = chcr_aes_cbc_setkey,
.encrypt = chcr_aes_encrypt,
.decrypt = chcr_aes_decrypt,
}
}
},
{
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts(aes-chcr)",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct ablk_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = chcr_cra_init,
.cra_exit = NULL,
.cra_u = {
.ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = chcr_aes_xts_setkey,
.encrypt = chcr_aes_encrypt,
.decrypt = chcr_aes_decrypt,
}
}
}
},
/* SHA */
{
.type = CRYPTO_ALG_TYPE_AHASH,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_AHASH,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_AHASH,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_AHASH,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_AHASH,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
/* HMAC */
{
.type = CRYPTO_ALG_TYPE_HMAC,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac(sha1-chcr)",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_HMAC,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac(sha224-chcr)",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_HMAC,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac(sha256-chcr)",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_HMAC,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac(sha384-chcr)",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
},
{
.type = CRYPTO_ALG_TYPE_HMAC,
.is_registered = 0,
.alg.hash = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac(sha512-chcr)",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
};
/*
* chcr_unregister_alg - Deregister crypto algorithms with
* kernel framework.
*/
static int chcr_unregister_alg(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
if (driver_algs[i].is_registered)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
&driver_algs[i].alg.hash);
break;
}
driver_algs[i].is_registered = 0;
}
return 0;
}
#define SZ_AHASH_CTX sizeof(struct chcr_context)
#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
/*
* chcr_register_alg - Register crypto algorithms with kernel framework.
*/
static int chcr_register_alg(void)
{
struct crypto_alg ai;
struct ahash_alg *a_hash;
int err = 0, i;
char *name = NULL;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (driver_algs[i].is_registered)
continue;
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;
a_hash->final = chcr_ahash_final;
a_hash->finup = chcr_ahash_finup;
a_hash->digest = chcr_ahash_digest;
a_hash->export = chcr_ahash_export;
a_hash->import = chcr_ahash_import;
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
a_hash->halg.base.cra_type = &crypto_ahash_type;
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
a_hash->init = chcr_hmac_init;
a_hash->setkey = chcr_ahash_setkey;
a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
} else {
a_hash->init = chcr_sha_init;
a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
a_hash->halg.base.cra_init = chcr_sha_cra_init;
}
err = crypto_register_ahash(&driver_algs[i].alg.hash);
ai = driver_algs[i].alg.hash.halg.base;
name = ai.cra_driver_name;
break;
}
if (err) {
pr_err("chcr : %s : Algorithm registration failed\n",
name);
goto register_err;
} else {
driver_algs[i].is_registered = 1;
}
}
return 0;
register_err:
chcr_unregister_alg();
return err;
}
/*
* start_crypto - Register the crypto algorithms.
* This should called once when the first device comesup. After this
* kernel will start calling driver APIs for crypto operations.
*/
int start_crypto(void)
{
return chcr_register_alg();
}
/*
* stop_crypto - Deregister all the crypto algorithms with kernel.
* This should be called once when the last device goes down. After this
* kernel will not call the driver API for crypto operations.
*/
int stop_crypto(void)
{
chcr_unregister_alg();
return 0;
}
/*
* This file is part of the Chelsio T6 Crypto driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __CHCR_ALGO_H__
#define __CHCR_ALGO_H__
/* Crypto key context */
#define KEY_CONTEXT_CTX_LEN_S 24
#define KEY_CONTEXT_CTX_LEN_M 0xff
#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
#define KEY_CONTEXT_CTX_LEN_G(x) \
(((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M)
#define KEY_CONTEXT_DUAL_CK_S 12
#define KEY_CONTEXT_DUAL_CK_M 0x1
#define KEY_CONTEXT_DUAL_CK_V(x) ((x) << KEY_CONTEXT_DUAL_CK_S)
#define KEY_CONTEXT_DUAL_CK_G(x) \
(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M)
#define KEY_CONTEXT_DUAL_CK_F KEY_CONTEXT_DUAL_CK_V(1U)
#define KEY_CONTEXT_SALT_PRESENT_S 10
#define KEY_CONTEXT_SALT_PRESENT_M 0x1
#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
#define KEY_CONTEXT_SALT_PRESENT_G(x) \
(((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \
KEY_CONTEXT_SALT_PRESENT_M)
#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
#define KEY_CONTEXT_VALID_S 0
#define KEY_CONTEXT_VALID_M 0x1
#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
#define KEY_CONTEXT_VALID_G(x) \
(((x) >> KEY_CONTEXT_VALID_S) & \
KEY_CONTEXT_VALID_M)
#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
#define KEY_CONTEXT_CK_SIZE_S 6
#define KEY_CONTEXT_CK_SIZE_M 0xf
#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
#define KEY_CONTEXT_CK_SIZE_G(x) \
(((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M)
#define KEY_CONTEXT_MK_SIZE_S 2
#define KEY_CONTEXT_MK_SIZE_M 0xf
#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
#define KEY_CONTEXT_MK_SIZE_G(x) \
(((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M)
#define KEY_CONTEXT_OPAD_PRESENT_S 11
#define KEY_CONTEXT_OPAD_PRESENT_M 0x1
#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
#define KEY_CONTEXT_OPAD_PRESENT_G(x) \
(((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \
KEY_CONTEXT_OPAD_PRESENT_M)
#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
#define CHCR_HASH_MAX_DIGEST_SIZE 64
#define CHCR_MAX_SHA_DIGEST_SIZE 64
#define IPSEC_TRUNCATED_ICV_SIZE 12
#define TLS_TRUNCATED_HMAC_SIZE 10
#define CBCMAC_DIGEST_SIZE 16
#define MAX_HASH_NAME 20
#define SHA1_INIT_STATE_5X4B 5
#define SHA256_INIT_STATE_8X4B 8
#define SHA512_INIT_STATE_8X8B 8
#define SHA1_INIT_STATE SHA1_INIT_STATE_5X4B
#define SHA224_INIT_STATE SHA256_INIT_STATE_8X4B
#define SHA256_INIT_STATE SHA256_INIT_STATE_8X4B
#define SHA384_INIT_STATE SHA512_INIT_STATE_8X8B
#define SHA512_INIT_STATE SHA512_INIT_STATE_8X8B
#define DUMMY_BYTES 16
#define IPAD_DATA 0x36363636
#define OPAD_DATA 0x5c5c5c5c
#define TRANSHDR_SIZE(alignedkctx_len)\
(sizeof(struct ulptx_idata) +\
sizeof(struct ulp_txpkt) +\
sizeof(struct fw_crypto_lookaside_wr) +\
sizeof(struct cpl_tx_sec_pdu) +\
(alignedkctx_len))
#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
(TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
sizeof(struct cpl_rx_phys_dsgl))
#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
(TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
sizeof(struct ulp_txpkt) + \
sizeof(struct ulptx_idata))
#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
htonl( \
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
htonl( \
CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \
CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \
CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi)))
#define FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \
htonl( \
CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \
CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
htonl( \
SCMD_SEQ_NO_CTRL_V(0) | \
SCMD_STATUS_PRESENT_V(0) | \
SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \
SCMD_ENC_DEC_CTRL_V((ctrl)) | \
SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \
SCMD_CIPH_MODE_V((cmode)) | \
SCMD_AUTH_MODE_V((amode)) | \
SCMD_HMAC_CTRL_V((opad)) | \
SCMD_IV_SIZE_V((size)) | \
SCMD_NUM_IVS_V((nivs)))
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
SCMD_ENB_DBGID_V(0) | \
SCMD_IV_GEN_CTRL_V(0) | \
SCMD_LAST_FRAG_V((last)) | \
SCMD_MORE_FRAGS_V((more)) | \
SCMD_TLS_COMPPDU_V(0) | \
SCMD_KEY_CTX_INLINE_V((ctx_in)) | \
SCMD_TLS_FRAG_ENABLE_V(0) | \
SCMD_MAC_ONLY_V((mac)) | \
SCMD_AADIVDROP_V((ivdrop)) | \
SCMD_HDR_LEN_V((len)))
#define FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
htonl(KEY_CONTEXT_VALID_V(1) | \
KEY_CONTEXT_CK_SIZE_V((ck_size)) | \
KEY_CONTEXT_MK_SIZE_V(mk_size) | \
KEY_CONTEXT_DUAL_CK_V((d_ck)) | \
KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \
KEY_CONTEXT_SALT_PRESENT_V(1) | \
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
FW_CRYPTO_LOOKASIDE_WR) | \
FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
#define FILL_WR_RX_Q_ID(cid, qid, wr_iv) \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \
FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)))
#define FILL_ULPTX_CMD_DEST(cid) \
htonl(ULPTX_CMD_V(ULP_TX_PKT) | \
ULP_TXPKT_DEST_V(0) | \
ULP_TXPKT_DATAMODIFY_V(0) | \
ULP_TXPKT_CHANNELID_V((cid)) | \
ULP_TXPKT_RO_V(1) | \
ULP_TXPKT_FID_V(0))
#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\
_bs == SHA1_DIGEST_SIZE ? 12 : 0; })
#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \
sgl_lengths[total_frags] : 0) |\
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0))
#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\
calc_tx_flits_ofld(skb) * 8), 16)))
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
#define MAX_NK 8
#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
struct algo_param {
unsigned int auth_mode;
unsigned int mk_size;
unsigned int result_size;
};
struct hash_wr_param {
unsigned int opad_needed;
unsigned int more;
unsigned int last;
struct algo_param alg_prm;
unsigned int sg_len;
unsigned int bfr_len;
u64 scmd1;
};
enum {
AES_KEYLENGTH_128BIT = 128,
AES_KEYLENGTH_192BIT = 192,
AES_KEYLENGTH_256BIT = 256
};
enum {
KEYLENGTH_3BYTES = 3,
KEYLENGTH_4BYTES = 4,
KEYLENGTH_6BYTES = 6,
KEYLENGTH_8BYTES = 8
};
enum {
NUMBER_OF_ROUNDS_10 = 10,
NUMBER_OF_ROUNDS_12 = 12,
NUMBER_OF_ROUNDS_14 = 14,
};
/*
* CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets,
* where they indicate the size of the integrity check value (ICV)
*/
enum {
AES_CCM_ICV_4 = 4,
AES_CCM_ICV_6 = 6,
AES_CCM_ICV_8 = 8,
AES_CCM_ICV_10 = 10,
AES_CCM_ICV_12 = 12,
AES_CCM_ICV_14 = 14,
AES_CCM_ICV_16 = 16
};
struct hash_op_params {
unsigned char mk_size;
unsigned char pad_align;
unsigned char auth_mode;
char hash_name[MAX_HASH_NAME];
unsigned short block_size;
unsigned short word_size;
unsigned short ipad_size;
};
struct phys_sge_pairs {
__be16 len[8];
__be64 addr[8];
};
struct phys_sge_parm {
unsigned int nents;
unsigned int obsize;
unsigned short qid;
unsigned char align;
};
struct crypto_result {
struct completion completion;
int err;
};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
};
static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
};
static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
};
static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = {
SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
};
static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = {
SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
};
static inline void copy_hash_init_values(char *key, int digestsize)
{
u8 i;
__be32 *dkey = (__be32 *)key;
u64 *ldkey = (u64 *)key;
__be64 *sha384 = (__be64 *)sha384_init;
__be64 *sha512 = (__be64 *)sha512_init;
switch (digestsize) {
case SHA1_DIGEST_SIZE:
for (i = 0; i < SHA1_INIT_STATE; i++)
dkey[i] = cpu_to_be32(sha1_init[i]);
break;
case SHA224_DIGEST_SIZE:
for (i = 0; i < SHA224_INIT_STATE; i++)
dkey[i] = cpu_to_be32(sha224_init[i]);
break;
case SHA256_DIGEST_SIZE:
for (i = 0; i < SHA256_INIT_STATE; i++)
dkey[i] = cpu_to_be32(sha256_init[i]);
break;
case SHA384_DIGEST_SIZE:
for (i = 0; i < SHA384_INIT_STATE; i++)
ldkey[i] = be64_to_cpu(sha384[i]);
break;
case SHA512_DIGEST_SIZE:
for (i = 0; i < SHA512_INIT_STATE; i++)
ldkey[i] = be64_to_cpu(sha512[i]);
break;
}
}
static const u8 sgl_lengths[20] = {
0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
};
/* Number of len fields(8) * size of one addr field */
#define PHYSDSGL_MAX_LEN_SIZE 16
static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr)
{
/* len field size + addr field size */
return ((sgl_entr >> 3) + ((sgl_entr % 8) ?
1 : 0)) * PHYSDSGL_MAX_LEN_SIZE +
(sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3);
}
/* The AES s-transform matrix (s-box). */
static const u8 aes_sbox[256] = {
99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215,
171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175,
156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165,
229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7,
18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90,
160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32,
252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170,
251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81,
163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243,
210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100,
93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184,
20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194,
211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78,
169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166,
180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102,
72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248,
152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84,
187, 22
};
static u32 aes_ks_subword(const u32 w)
{
u8 bytes[4];
*(u32 *)(&bytes[0]) = w;
bytes[0] = aes_sbox[bytes[0]];
bytes[1] = aes_sbox[bytes[1]];
bytes[2] = aes_sbox[bytes[2]];
bytes[3] = aes_sbox[bytes[3]];
return *(u32 *)(&bytes[0]);
}
static u32 round_constant[11] = {
0x01000000, 0x02000000, 0x04000000, 0x08000000,
0x10000000, 0x20000000, 0x40000000, 0x80000000,
0x1B000000, 0x36000000, 0x6C000000
};
/* dec_key - OUTPUT - Reverse round key
* key - INPUT - key
* keylength - INPUT - length of the key in number of bits
*/
static inline void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key,
unsigned int keylength)
{
u32 temp;
u32 w_ring[MAX_NK];
int i, j, k = 0;
u8 nr, nk;
switch (keylength) {
case AES_KEYLENGTH_128BIT:
nk = KEYLENGTH_4BYTES;
nr = NUMBER_OF_ROUNDS_10;
break;
case AES_KEYLENGTH_192BIT:
nk = KEYLENGTH_6BYTES;
nr = NUMBER_OF_ROUNDS_12;
break;
case AES_KEYLENGTH_256BIT:
nk = KEYLENGTH_8BYTES;
nr = NUMBER_OF_ROUNDS_14;
break;
default:
return;
}
for (i = 0; i < nk; i++ )
w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
i = 0;
temp = w_ring[nk - 1];
while(i + nk < (nr + 1) * 4) {
if(!(i % nk)) {
/* RotWord(temp) */
temp = (temp << 8) | (temp >> 24);
temp = aes_ks_subword(temp);
temp ^= round_constant[i / nk];
}
else if (nk == 8 && (i % 4 == 0))
temp = aes_ks_subword(temp);
w_ring[i % nk] ^= temp;
temp = w_ring[i % nk];
i++;
}
for (k = 0, j = i % nk; k < nk; k++) {
*((u32 *)dec_key + k) = htonl(w_ring[j]);
j--;
if(j < 0)
j += nk;
}
}
#endif /* __CHCR_ALGO_H__ */
/**
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written and Maintained by:
* Manoj Malviya (manojmalviya@chelsio.com)
* Atul Gupta (atul.gupta@chelsio.com)
* Jitendra Lulla (jlulla@chelsio.com)
* Yeshaswi M R Gowda (yeshaswi@chelsio.com)
* Harsh Jain (harsh@chelsio.com)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
#include "t4_msg.h"
#include "chcr_core.h"
#include "cxgb4_uld.h"
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static atomic_t dev_count;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
};
static struct cxgb4_pci_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = 4,
.rxq_size = 1024,
.nciq = 0,
.ciq_size = 0,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
};
int assign_chcr_device(struct chcr_dev **dev)
{
struct uld_ctx *u_ctx;
/*
* Which device to use if multiple devices are available TODO
* May be select the device based on round robin. One session
* must go to the same device to maintain the ordering.
*/
mutex_lock(&dev_mutex); /* TODO ? */
u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
if (!u_ctx) {
mutex_unlock(&dev_mutex);
return -ENXIO;
}
*dev = u_ctx->dev;
mutex_unlock(&dev_mutex);
return 0;
}
static int chcr_dev_add(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENXIO;
spin_lock_init(&dev->lock_chcr_dev);
u_ctx->dev = dev;
dev->u_ctx = u_ctx;
atomic_inc(&dev_count);
return 0;
}
static int chcr_dev_remove(struct uld_ctx *u_ctx)
{
kfree(u_ctx->dev);
u_ctx->dev = NULL;
atomic_dec(&dev_count);
return 0;
}
static int cpl_fw6_pld_handler(struct chcr_dev *dev,
unsigned char *input)
{
struct crypto_async_request *req;
struct cpl_fw6_pld *fw6_pld;
u32 ack_err_status = 0;
int error_status = 0;
fw6_pld = (struct cpl_fw6_pld *)input;
req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
fw6_pld->data[1]);
ack_err_status =
ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
error_status = -EINVAL;
}
/* call completion callback with failure status */
if (req) {
if (!chcr_handle_resp(req, input, error_status))
req->complete(req, error_status);
else
return -EINVAL;
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
}
return 0;
}
int chcr_send_wr(struct sk_buff *skb)
{
return cxgb4_ofld_send(skb->dev, skb);
}
static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
{
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
if (!u_ctx) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
u_ctx->lldi = *lld;
mutex_lock(&dev_mutex);
list_add_tail(&u_ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
out:
return u_ctx;
}
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
struct chcr_dev *dev = u_ctx->dev;
const struct cpl_act_establish *rpl = (struct cpl_act_establish
*)rsp;
if (rpl->ot.opcode != CPL_FW6_PLD) {
pr_err("Unsupported opcode\n");
return 0;
}
if (!pgl)
work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]);
else
work_handlers[rpl->ot.opcode](dev, pgl->va);
return 0;
}
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
int ret = 0;
switch (state) {
case CXGB4_STATE_UP:
if (!u_ctx->dev) {
ret = chcr_dev_add(u_ctx);
if (ret != 0)
return ret;
}
if (atomic_read(&dev_count) == 1)
ret = start_crypto();
break;
case CXGB4_STATE_DETACH:
if (u_ctx->dev) {
mutex_lock(&dev_mutex);
chcr_dev_remove(u_ctx);
mutex_unlock(&dev_mutex);
}
if (!atomic_read(&dev_count))
stop_crypto();
break;
case CXGB4_STATE_START_RECOVERY:
case CXGB4_STATE_DOWN:
default:
break;
}
return ret;
}
static int __init chcr_crypto_init(void)
{
if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) {
pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1;
}
return 0;
}
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
if (atomic_read(&dev_count))
stop_crypto();
/* Remove all devices from list */
mutex_lock(&dev_mutex);
list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
if (u_ctx->dev)
chcr_dev_remove(u_ctx);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1);
}
module_init(chcr_crypto_init);
module_exit(chcr_crypto_exit);
MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(DRV_VERSION);
/*
* This file is part of the Chelsio T6 Crypto driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __CHCR_CORE_H__
#define __CHCR_CORE_H__
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
#define DRV_VERSION "1.0.0.0"
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
#define PAD_ERROR_BIT 1
#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
struct uld_ctx;
struct chcr_dev {
/* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
struct chcr_dev *dev;
};
int assign_chcr_device(struct chcr_dev **dev);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
#endif /* __CHCR_CORE_H__ */
/*
* This file is part of the Chelsio T6 Crypto driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
#define CHCR_CRA_PRIORITY 300
#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/
#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/
#define CHCR_GIVENCRYPT_OP 2
/* CPL/SCMD parameters */
#define CHCR_ENCRYPT_OP 0
#define CHCR_DECRYPT_OP 1
#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1
#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2
#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
#define CHCR_SCMD_CIPHER_MODE_NOP 0
#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
/* This are not really mac key size. They are intermediate values
* of sha engine and its size
*/
#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1
#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2
#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3
#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1
#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2
#define CHCR_KEYCTX_NO_KEY 15
#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */
#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */
#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */
#define KEY_CONTEXT_HDR_SALT_AND_PAD 16
#define flits_to_bytes(x) (x * 8)
#define IV_NOP 0
#define IV_IMMEDIATE 1
#define IV_DSGL 2
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
struct _key_ctx {
__be32 ctx_hdr;
u8 salt[MAX_SALT];
__be64 reserverd;
unsigned char key[0];
};
struct ablk_ctx {
u8 enc;
unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
unsigned int dst_nents;
struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
};
struct hmac_ctx {
struct shash_desc *desc;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
};
struct chcr_context {
struct chcr_dev *dev;
unsigned char tx_channel_id;
struct __crypto_ctx crypto_ctx[0];
};
struct chcr_ahash_req_ctx {
u32 result;
char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 bfr_len;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
};
struct chcr_alg_template {
u32 type;
u32 is_registered;
union {
struct crypto_alg crypto;
struct ahash_alg hash;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
struct sge_opaque_hdr {
void *dev;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
struct chcr_context *ctx,
unsigned short qid,
unsigned short op_type);
#endif /* __CHCR_CRYPTO_H__ */
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
...@@ -53,6 +53,8 @@ ...@@ -53,6 +53,8 @@
#include "cxgb4_uld.h" #include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
extern struct mutex uld_mutex;
enum { enum {
MAX_NPORTS = 4, /* max # of ports */ MAX_NPORTS = 4, /* max # of ports */
...@@ -338,6 +340,7 @@ struct adapter_params { ...@@ -338,6 +340,7 @@ struct adapter_params {
enum chip_type chip; /* chip code */ enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */ struct arch_specific_params arch; /* chip specific params */
unsigned char offload; unsigned char offload;
unsigned char crypto; /* HW capability for crypto */
unsigned char bypass; unsigned char bypass;
...@@ -403,7 +406,6 @@ struct fw_info { ...@@ -403,7 +406,6 @@ struct fw_info {
struct fw_hdr fw_hdr; struct fw_hdr fw_hdr;
}; };
struct trace_params { struct trace_params {
u32 data[TRACE_LEN / 4]; u32 data[TRACE_LEN / 4];
u32 mask[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4];
...@@ -510,6 +512,10 @@ enum { /* adapter flags */ ...@@ -510,6 +512,10 @@ enum { /* adapter flags */
FW_OFLD_CONN = (1 << 9), FW_OFLD_CONN = (1 << 9),
}; };
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
};
struct rx_sw_desc; struct rx_sw_desc;
struct sge_fl { /* SGE free-buffer queue state */ struct sge_fl { /* SGE free-buffer queue state */
...@@ -680,6 +686,16 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */ ...@@ -680,6 +686,16 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
u8 full; /* the Tx ring is full */ u8 full; /* the Tx ring is full */
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info {
char name[IFNAMSIZ]; /* name of ULD driver */
struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
u16 *msix_tbl; /* msix_tbl for uld */
u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */
u8 uld; /* uld type */
};
struct sge { struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
...@@ -691,6 +707,7 @@ struct sge { ...@@ -691,6 +707,7 @@ struct sge {
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info **uld_rxq_info;
struct sge_rspq intrq ____cacheline_aligned_in_smp; struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock; spinlock_t intrq_lock;
...@@ -702,6 +719,7 @@ struct sge { ...@@ -702,6 +719,7 @@ struct sge {
u16 niscsitq; /* # of available iSCST Rx queues */ u16 niscsitq; /* # of available iSCST Rx queues */
u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 iscsi_rxq[MAX_OFLD_QSETS]; u16 iscsi_rxq[MAX_OFLD_QSETS];
u16 iscsit_rxq[MAX_ISCSIT_QUEUES]; u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
u16 rdma_rxq[MAX_RDMA_QUEUES]; u16 rdma_rxq[MAX_RDMA_QUEUES];
...@@ -757,6 +775,17 @@ struct hash_mac_addr { ...@@ -757,6 +775,17 @@ struct hash_mac_addr {
u8 addr[ETH_ALEN]; u8 addr[ETH_ALEN];
}; };
struct uld_msix_bmap {
unsigned long *msix_bmap;
unsigned int mapsize;
spinlock_t lock; /* lock for acquiring bitmap */
};
struct uld_msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
};
struct adapter { struct adapter {
void __iomem *regs; void __iomem *regs;
void __iomem *bar2; void __iomem *bar2;
...@@ -779,6 +808,9 @@ struct adapter { ...@@ -779,6 +808,9 @@ struct adapter {
unsigned short vec; unsigned short vec;
char desc[IFNAMSIZ + 10]; char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1]; } msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
unsigned int msi_idx;
struct doorbell_stats db_stats; struct doorbell_stats db_stats;
struct sge sge; struct sge sge;
...@@ -793,7 +825,9 @@ struct adapter { ...@@ -793,7 +825,9 @@ struct adapter {
unsigned int clipt_start; unsigned int clipt_start;
unsigned int clipt_end; unsigned int clipt_end;
struct clip_tbl *clipt; struct clip_tbl *clipt;
struct cxgb4_pci_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX]; void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
struct list_head list_node; struct list_head list_node;
struct list_head rcu_node; struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
...@@ -952,6 +986,11 @@ static inline int is_offload(const struct adapter *adap) ...@@ -952,6 +986,11 @@ static inline int is_offload(const struct adapter *adap)
return adap->params.offload; return adap->params.offload;
} }
static inline int is_pci_uld(const struct adapter *adap)
{
return adap->params.crypto;
}
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{ {
return readl(adap->regs + reg_addr); return readl(adap->regs + reg_addr);
...@@ -1185,8 +1224,6 @@ int t4_sge_init(struct adapter *adap); ...@@ -1185,8 +1224,6 @@ int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap); void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap); void t4_sge_stop(struct adapter *adap);
int cxgb_busy_poll(struct napi_struct *napi); int cxgb_busy_poll(struct napi_struct *napi);
int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
unsigned int cnt);
void cxgb4_set_ethtool_ops(struct net_device *netdev); void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh; extern int dbfifo_int_thresh;
...@@ -1289,6 +1326,18 @@ static inline int hash_mac_addr(const u8 *addr) ...@@ -1289,6 +1326,18 @@ static inline int hash_mac_addr(const u8 *addr)
return a & 0x3f; return a & 0x3f;
} }
int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
unsigned int cnt);
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
q->adap = adap;
cxgb4_set_rspq_intr_params(q, us, cnt);
q->iqe_len = iqe_size;
q->size = size;
}
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals, unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx); unsigned int nregs, unsigned int start_idx);
...@@ -1523,5 +1572,7 @@ void t4_idma_monitor(struct adapter *adapter, ...@@ -1523,5 +1572,7 @@ void t4_idma_monitor(struct adapter *adapter,
int hz, int ticks); int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr); unsigned int naddr, u8 *addr);
void uld_mem_free(struct adapter *adap);
int uld_mem_alloc(struct adapter *adap);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
#endif /* __CXGB4_H__ */ #endif /* __CXGB4_H__ */
...@@ -223,8 +223,8 @@ MODULE_PARM_DESC(select_queue, ...@@ -223,8 +223,8 @@ MODULE_PARM_DESC(select_queue,
static struct dentry *cxgb4_debugfs_root; static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list); LIST_HEAD(adapter_list);
static DEFINE_MUTEX(uld_mutex); DEFINE_MUTEX(uld_mutex);
/* Adapter list to be accessed from atomic context */ /* Adapter list to be accessed from atomic context */
static LIST_HEAD(adap_rcu_list); static LIST_HEAD(adap_rcu_list);
static DEFINE_SPINLOCK(adap_rcu_lock); static DEFINE_SPINLOCK(adap_rcu_lock);
...@@ -1066,20 +1066,20 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, ...@@ -1066,20 +1066,20 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
*/ */
static int setup_sge_queues(struct adapter *adap) static int setup_sge_queues(struct adapter *adap)
{ {
int err, msi_idx, i, j; int err, i, j;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
bitmap_zero(s->starving_fl, s->egr_sz); bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz); bitmap_zero(s->txq_maperr, s->egr_sz);
if (adap->flags & USING_MSIX) if (adap->flags & USING_MSIX)
msi_idx = 1; /* vector 0 is for non-queue interrupts */ adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
else { else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
NULL, NULL, NULL, -1); NULL, NULL, NULL, -1);
if (err) if (err)
return err; return err;
msi_idx = -((int)s->intrq.abs_id + 1); adap->msi_idx = -((int)s->intrq.abs_id + 1);
} }
/* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
...@@ -1096,7 +1096,7 @@ static int setup_sge_queues(struct adapter *adap) ...@@ -1096,7 +1096,7 @@ static int setup_sge_queues(struct adapter *adap)
* new/deleted queues. * new/deleted queues.
*/ */
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
msi_idx, NULL, fwevtq_handler, NULL, -1); adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
if (err) { if (err) {
freeout: t4_free_sge_resources(adap); freeout: t4_free_sge_resources(adap);
return err; return err;
...@@ -1109,10 +1109,10 @@ freeout: t4_free_sge_resources(adap); ...@@ -1109,10 +1109,10 @@ freeout: t4_free_sge_resources(adap);
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) { for (j = 0; j < pi->nqsets; j++, q++) {
if (msi_idx > 0) if (adap->msi_idx > 0)
msi_idx++; adap->msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl, adap->msi_idx, &q->fl,
t4_ethrx_handler, t4_ethrx_handler,
NULL, NULL,
t4_get_mps_bg_map(adap, t4_get_mps_bg_map(adap,
...@@ -1141,11 +1141,11 @@ freeout: t4_free_sge_resources(adap); ...@@ -1141,11 +1141,11 @@ freeout: t4_free_sge_resources(adap);
} }
#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \ #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \ err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \
if (err) \ if (err) \
goto freeout; \ goto freeout; \
if (msi_idx > 0) \ if (adap->msi_idx > 0) \
msi_idx += nq; \ adap->msi_idx += nq; \
} while (0) } while (0)
ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false); ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
...@@ -2565,6 +2565,12 @@ static void detach_ulds(struct adapter *adap) ...@@ -2565,6 +2565,12 @@ static void detach_ulds(struct adapter *adap)
CXGB4_STATE_DETACH); CXGB4_STATE_DETACH);
adap->uld_handle[i] = NULL; adap->uld_handle[i] = NULL;
} }
for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle) {
adap->uld[i].state_change(adap->uld[i].handle,
CXGB4_STATE_DETACH);
adap->uld[i].handle = NULL;
}
if (netevent_registered && list_empty(&adapter_list)) { if (netevent_registered && list_empty(&adapter_list)) {
unregister_netevent_notifier(&cxgb4_netevent_nb); unregister_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = false; netevent_registered = false;
...@@ -2584,6 +2590,10 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) ...@@ -2584,6 +2590,10 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
for (i = 0; i < CXGB4_ULD_MAX; i++) for (i = 0; i < CXGB4_ULD_MAX; i++)
if (adap->uld_handle[i]) if (adap->uld_handle[i])
ulds[i].state_change(adap->uld_handle[i], new_state); ulds[i].state_change(adap->uld_handle[i], new_state);
for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle)
adap->uld[i].state_change(adap->uld[i].handle,
new_state);
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
} }
...@@ -4170,6 +4180,11 @@ static int adap_init0(struct adapter *adap) ...@@ -4170,6 +4180,11 @@ static int adap_init0(struct adapter *adap)
adap->vres.iscsi.start = val[0]; adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1; adap->vres.iscsi.size = val[1] - val[0] + 1;
} }
if (caps_cmd.cryptocaps) {
/* Should query params here...TODO */
adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
adap->num_uld += 1;
}
#undef FW_PARAM_PFVF #undef FW_PARAM_PFVF
#undef FW_PARAM_DEV #undef FW_PARAM_DEV
...@@ -4351,16 +4366,6 @@ static inline bool is_x_10g_port(const struct link_config *lc) ...@@ -4351,16 +4366,6 @@ static inline bool is_x_10g_port(const struct link_config *lc)
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0; (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
} }
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
q->adap = adap;
cxgb4_set_rspq_intr_params(q, us, cnt);
q->iqe_len = iqe_size;
q->size = size;
}
/* /*
* Perform default configuration of DMA queues depending on the number and type * Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be * of ports we found and the number of available CPUs. Most settings can be
...@@ -4375,6 +4380,15 @@ static void cfg_queues(struct adapter *adap) ...@@ -4375,6 +4380,15 @@ static void cfg_queues(struct adapter *adap)
#endif #endif
int ciq_size; int ciq_size;
/* Reduce memory usage in kdump environment, disable all offload.
*/
if (is_kdump_kernel()) {
adap->params.offload = 0;
adap->params.crypto = 0;
} else if (adap->num_uld && uld_mem_alloc(adap)) {
adap->params.crypto = 0;
}
for_each_port(adap, i) for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB #ifdef CONFIG_CHELSIO_T4_DCB
...@@ -4405,11 +4419,6 @@ static void cfg_queues(struct adapter *adap) ...@@ -4405,11 +4419,6 @@ static void cfg_queues(struct adapter *adap)
if (q10g > netif_get_num_default_rss_queues()) if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues(); q10g = netif_get_num_default_rss_queues();
/* Reduce memory usage in kdump environment, disable all offload.
*/
if (is_kdump_kernel())
adap->params.offload = 0;
for_each_port(adap, i) { for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i); struct port_info *pi = adap2pinfo(adap, i);
...@@ -4538,23 +4547,58 @@ static void reduce_ethqs(struct adapter *adap, int n) ...@@ -4538,23 +4547,58 @@ static void reduce_ethqs(struct adapter *adap, int n)
} }
} }
static int get_msix_info(struct adapter *adap)
{
struct uld_msix_info *msix_info;
int max_ingq = (MAX_OFLD_QSETS * adap->num_uld);
msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
return -ENOMEM;
adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
sizeof(long), GFP_KERNEL);
if (!adap->msix_bmap_ulds.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
}
spin_lock_init(&adap->msix_bmap_ulds.lock);
adap->msix_info_ulds = msix_info;
return 0;
}
static void free_msix_info(struct adapter *adap)
{
if (!adap->num_uld)
return;
kfree(adap->msix_info_ulds);
kfree(adap->msix_bmap_ulds.msix_bmap);
}
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2 #define EXTRA_VECS 2
static int enable_msix(struct adapter *adap) static int enable_msix(struct adapter *adap)
{ {
int ofld_need = 0; int ofld_need = 0, uld_need = 0;
int i, want, need, allocated; int i, j, want, need, allocated;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports; unsigned int nchan = adap->params.nports;
struct msix_entry *entries; struct msix_entry *entries;
int max_ingq = MAX_INGQ;
entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
GFP_KERNEL); GFP_KERNEL);
if (!entries) if (!entries)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < MAX_INGQ + 1; ++i) /* map for msix */
if (is_pci_uld(adap) && get_msix_info(adap))
adap->params.crypto = 0;
for (i = 0; i < max_ingq + 1; ++i)
entries[i].entry = i; entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS; want = s->max_ethqsets + EXTRA_VECS;
...@@ -4567,13 +4611,17 @@ static int enable_msix(struct adapter *adap) ...@@ -4567,13 +4611,17 @@ static int enable_msix(struct adapter *adap)
else else
ofld_need = 4 * nchan; ofld_need = 4 * nchan;
} }
if (is_pci_uld(adap)) {
want += netif_get_num_default_rss_queues() * nchan;
uld_need = nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB #ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port. * each port.
*/ */
need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
#else #else
need = adap->params.nports + EXTRA_VECS + ofld_need; need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
#endif #endif
allocated = pci_enable_msix_range(adap->pdev, entries, need, want); allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) { if (allocated < 0) {
...@@ -4587,12 +4635,20 @@ static int enable_msix(struct adapter *adap) ...@@ -4587,12 +4635,20 @@ static int enable_msix(struct adapter *adap)
* Every group gets its minimum requirement and NIC gets top * Every group gets its minimum requirement and NIC gets top
* priority for leftovers. * priority for leftovers.
*/ */
i = allocated - EXTRA_VECS - ofld_need; i = allocated - EXTRA_VECS - ofld_need - uld_need;
if (i < s->max_ethqsets) { if (i < s->max_ethqsets) {
s->max_ethqsets = i; s->max_ethqsets = i;
if (i < s->ethqsets) if (i < s->ethqsets)
reduce_ethqs(adap, i); reduce_ethqs(adap, i);
} }
if (is_pci_uld(adap)) {
if (allocated < want)
s->nqs_per_uld = nchan;
else
s->nqs_per_uld = netif_get_num_default_rss_queues() *
nchan;
}
if (is_offload(adap)) { if (is_offload(adap)) {
if (allocated < want) { if (allocated < want) {
s->rdmaqs = nchan; s->rdmaqs = nchan;
...@@ -4605,15 +4661,23 @@ static int enable_msix(struct adapter *adap) ...@@ -4605,15 +4661,23 @@ static int enable_msix(struct adapter *adap)
/* leftovers go to OFLD */ /* leftovers go to OFLD */
i = allocated - EXTRA_VECS - s->max_ethqsets - i = allocated - EXTRA_VECS - s->max_ethqsets -
s->rdmaqs - s->rdmaciqs - s->niscsitq; s->rdmaqs - s->rdmaciqs - s->niscsitq;
if (is_pci_uld(adap))
i -= s->nqs_per_uld * adap->num_uld;
s->iscsiqsets = (i / nchan) * nchan; /* round down */ s->iscsiqsets = (i / nchan) * nchan; /* round down */
} }
for (i = 0; i < allocated; ++i)
for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i)
adap->msix_info[i].vec = entries[i].vector; adap->msix_info[i].vec = entries[i].vector;
if (is_pci_uld(adap)) {
for (j = 0 ; i < allocated; ++i, j++)
adap->msix_info_ulds[j].vec = entries[i].vector;
adap->msix_bmap_ulds.mapsize = j;
}
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
"nic %d iscsi %d rdma cpl %d rdma ciq %d\n", "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n",
allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs, allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
s->rdmaciqs); s->rdmaciqs, s->nqs_per_uld);
kfree(entries); kfree(entries);
return 0; return 0;
...@@ -5215,8 +5279,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -5215,8 +5279,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* See what interrupts we'll be using */ /* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0) if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX; adapter->flags |= USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0) else if (msi > 0 && pci_enable_msi(pdev) == 0) {
adapter->flags |= USING_MSI; adapter->flags |= USING_MSI;
if (msi > 1)
free_msix_info(adapter);
}
/* check for PCI Express bandwidth capabiltites */ /* check for PCI Express bandwidth capabiltites */
cxgb4_check_pcie_caps(adapter); cxgb4_check_pcie_caps(adapter);
...@@ -5332,6 +5399,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -5332,6 +5399,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out_free_dev: out_free_dev:
free_some_resources(adapter); free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld)
uld_mem_free(adapter);
out_unmap_bar: out_unmap_bar:
if (!is_t4(adapter->params.chip)) if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2); iounmap(adapter->bar2);
...@@ -5393,6 +5464,10 @@ static void remove_one(struct pci_dev *pdev) ...@@ -5393,6 +5464,10 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->flags & FULL_INIT_DONE) if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter); cxgb_down(adapter);
if (adapter->flags & USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld)
uld_mem_free(adapter);
free_some_resources(adapter); free_some_resources(adapter);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter); t4_cleanup_clip_tbl(adapter);
......
/*
* cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
*
* Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Written by: Atul Gupta (atul.gupta@chelsio.com)
* Written by: Hariprasad Shenai (hariprasad@chelsio.com)
*/
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/pci.h>
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "t4_regs.h"
#include "t4fw_api.h"
#include "t4_msg.h"
#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
static int get_msix_idx_from_bmap(struct adapter *adap)
{
struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
unsigned long flags;
unsigned int msix_idx;
spin_lock_irqsave(&bmap->lock, flags);
msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
if (msix_idx < bmap->mapsize) {
__set_bit(msix_idx, bmap->msix_bmap);
} else {
spin_unlock_irqrestore(&bmap->lock, flags);
return -ENOSPC;
}
spin_unlock_irqrestore(&bmap->lock, flags);
return msix_idx;
}
static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
{
struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
__clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct adapter *adap = q->adap;
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
int ret;
/* FW can send CPLs encapsulated in a CPL_FW4_MSG */
if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
rsp += 2;
if (q->flush_handler)
ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
rsp, gl, &q->lro_mgr,
&q->napi);
else
ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
rsp, gl);
if (ret) {
rxq->stats.nomem++;
return -1;
}
if (!gl)
rxq->stats.imm++;
else if (gl == CXGB4_MSG_AN)
rxq->stats.an++;
else
rxq->stats.pkts++;
return 0;
}
static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info,
unsigned int nq, unsigned int offset, bool lro)
{
struct sge *s = &adap->sge;
struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
unsigned short *ids = rxq_info->rspq_id + offset;
unsigned int per_chan = nq / adap->params.nports;
unsigned int msi_idx, bmap_idx;
int i, err;
if (adap->flags & USING_MSIX)
msi_idx = 1;
else
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
adap->msi_idx++;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
adap->msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
NULL,
0);
if (err)
goto freeout;
if (msi_idx >= 0)
rxq_info->msix_tbl[i + offset] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
q = rxq_info->uldrxq + offset;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
adap->msi_idx--;
}
/* We need to free rxq also in case of ciq allocation failure */
if (offset) {
q = rxq_info->uldrxq + offset;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
adap->msi_idx--;
}
}
return err;
}
int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
if (adap->flags & USING_MSIX) {
rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq,
GFP_KERNEL);
if (!rxq_info->msix_tbl)
return -ENOMEM;
}
return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
!alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
rxq_info->nrxq, lro));
}
static void t4_free_uld_rxqs(struct adapter *adap, int n,
struct sge_ofld_rxq *q)
{
for ( ; n; n--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
adap->msi_idx--;
}
}
void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
if (rxq_info->nciq)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
if (adap->flags & USING_MSIX)
kfree(rxq_info->msix_tbl);
}
int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
const struct cxgb4_pci_uld_info *uld_info)
{
struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info;
int i, nrxq;
rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
if (!rxq_info)
return -ENOMEM;
if (uld_info->nrxq > s->nqs_per_uld)
rxq_info->nrxq = s->nqs_per_uld;
else
rxq_info->nrxq = uld_info->nrxq;
if (!uld_info->nciq)
rxq_info->nciq = 0;
else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld)
rxq_info->nciq = s->nqs_per_uld;
else
rxq_info->nciq = uld_info->nciq;
nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
GFP_KERNEL);
if (!rxq_info->uldrxq) {
kfree(rxq_info);
return -ENOMEM;
}
rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
if (!rxq_info->uldrxq) {
kfree(rxq_info->uldrxq);
kfree(rxq_info);
return -ENOMEM;
}
for (i = 0; i < rxq_info->nrxq; i++) {
struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
r->rspq.uld = uld_type;
r->fl.size = 72;
}
for (i = rxq_info->nrxq; i < nrxq; i++) {
struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64);
r->rspq.uld = uld_type;
r->fl.size = 72;
}
memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
adap->sge.uld_rxq_info[uld_type] = rxq_info;
return 0;
}
void free_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
kfree(rxq_info->rspq_id);
kfree(rxq_info->uldrxq);
kfree(rxq_info);
}
int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx, bmap_idx, err = 0;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
t4_sge_intr_msix, 0,
adap->msix_info_ulds[bmap_idx].desc,
&rxq_info->uldrxq[idx].rspq);
if (err)
goto unwind;
}
return 0;
unwind:
while (--idx >= 0) {
bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
&rxq_info->uldrxq[idx].rspq);
}
return err;
}
void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
for_each_uldrxq(rxq_info, idx) {
unsigned int bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
&rxq_info->uldrxq[idx].rspq);
}
}
void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc);
int idx;
for_each_uldrxq(rxq_info, idx) {
unsigned int bmap_idx = rxq_info->msix_tbl[idx];
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx);
}
}
static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
if (!q)
return;
if (q->handler) {
cxgb_busy_poll_init_lock(q);
napi_enable(&q->napi);
}
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
INGRESSQID_V(q->cntxt_id));
}
static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
{
if (q && q->handler) {
napi_disable(&q->napi);
local_bh_disable();
while (!cxgb_poll_lock_napi(q))
mdelay(1);
local_bh_enable();
}
}
void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
for_each_uldrxq(rxq_info, idx)
enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
}
void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
for_each_uldrxq(rxq_info, idx)
quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
}
static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
lli->rxq_ids = rxq_info->rspq_id;
lli->nrxq = rxq_info->nrxq;
lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
lli->nciq = rxq_info->nciq;
}
int uld_mem_alloc(struct adapter *adap)
{
struct sge *s = &adap->sge;
adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL);
if (!adap->uld)
return -ENOMEM;
s->uld_rxq_info = kzalloc(adap->num_uld *
sizeof(struct sge_uld_rxq_info *),
GFP_KERNEL);
if (!s->uld_rxq_info)
goto err_uld;
return 0;
err_uld:
kfree(adap->uld);
return -ENOMEM;
}
void uld_mem_free(struct adapter *adap)
{
struct sge *s = &adap->sge;
kfree(s->uld_rxq_info);
kfree(adap->uld);
}
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
{
int i;
lld->pdev = adap->pdev;
lld->pf = adap->pf;
lld->l2t = adap->l2t;
lld->tids = &adap->tids;
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
lld->ntxq = adap->sge.iscsiqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
lld->adapter_type = adap->params.chip;
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
lld->ucq_density = 1 << adap->params.sge.iq_qpp;
lld->filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lld->tx_modq[i] = i;
lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
lld->fw_vers = adap->params.fw_vers;
lld->dbfifo_int_thresh = dbfifo_int_thresh;
lld->sge_ingpadboundary = adap->sge.fl_align;
lld->sge_egrstatuspagesize = adap->sge.stat_len;
lld->sge_pktshift = adap->sge.pktshift;
lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
lld->max_ordird_qp = adap->params.max_ordird_qp;
lld->max_ird_adapter = adap->params.max_ird_adapter;
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
lld->nodeid = dev_to_node(adap->pdev_dev);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
uld_init(adap, &lli);
uld_queue_init(adap, uld, &lli);
handle = adap->uld[uld].add(&lli);
if (IS_ERR(handle)) {
dev_warn(adap->pdev_dev,
"could not attach to the %s driver, error %ld\n",
adap->uld[uld].name, PTR_ERR(handle));
return;
}
adap->uld[uld].handle = handle;
if (adap->flags & FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
struct cxgb4_pci_uld_info *p)
{
int ret = 0;
struct adapter *adap;
if (type >= CXGB4_PCI_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
if (!is_pci_uld(adap))
continue;
ret = cfg_queues_uld(adap, type, p);
if (ret)
goto out;
ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret)
goto free_queues;
if (adap->flags & USING_MSIX) {
name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
}
if (adap->flags & FULL_INIT_DONE)
enable_rx_uld(adap, type);
if (adap->uld[type].add) {
ret = -EBUSY;
goto free_irq;
}
adap->uld[type] = *p;
uld_attach(adap, type);
}
mutex_unlock(&uld_mutex);
return 0;
free_irq:
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
free_queues:
free_queues_uld(adap, type);
out:
mutex_unlock(&uld_mutex);
return ret;
}
EXPORT_SYMBOL(cxgb4_register_pci_uld);
int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
{
struct adapter *adap;
if (type >= CXGB4_PCI_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
if (!is_pci_uld(adap))
continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
free_queues_uld(adap, type);
}
mutex_unlock(&uld_mutex);
return 0;
}
EXPORT_SYMBOL(cxgb4_unregister_pci_uld);
...@@ -32,8 +32,8 @@ ...@@ -32,8 +32,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef __CXGB4_OFLD_H #ifndef __CXGB4_ULD_H
#define __CXGB4_OFLD_H #define __CXGB4_ULD_H
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -296,8 +296,36 @@ struct cxgb4_uld_info { ...@@ -296,8 +296,36 @@ struct cxgb4_uld_info {
void (*lro_flush)(struct t4_lro_mgr *); void (*lro_flush)(struct t4_lro_mgr *);
}; };
enum cxgb4_pci_uld {
CXGB4_PCI_ULD1,
CXGB4_PCI_ULD_MAX
};
struct cxgb4_pci_uld_info {
const char *name;
bool lro;
void *handle;
unsigned int nrxq;
unsigned int nciq;
unsigned int rxq_size;
unsigned int ciq_size;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
int (*state_change)(void *handle, enum cxgb4_state new_state);
int (*control)(void *handle, enum cxgb4_control control, ...);
int (*lro_rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl,
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
struct cxgb4_pci_uld_info *p);
int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev); unsigned int cxgb4_port_chan(const struct net_device *dev);
...@@ -330,4 +358,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, ...@@ -330,4 +358,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
u64 *pbar2_qoffset, u64 *pbar2_qoffset,
unsigned int *pbar2_qid); unsigned int *pbar2_qid);
#endif /* !__CXGB4_OFLD_H */ #endif /* !__CXGB4_ULD_H */
...@@ -2928,7 +2928,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q) ...@@ -2928,7 +2928,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
q->desc = NULL; q->desc = NULL;
} }
static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
struct sge_fl *fl) struct sge_fl *fl)
{ {
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
......
...@@ -61,6 +61,7 @@ enum { ...@@ -61,6 +61,7 @@ enum {
CPL_ABORT_REQ_RSS = 0x2B, CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D, CPL_ABORT_RPL_RSS = 0x2D,
CPL_RX_PHYS_ADDR = 0x30,
CPL_CLOSE_CON_RPL = 0x32, CPL_CLOSE_CON_RPL = 0x32,
CPL_ISCSI_HDR = 0x33, CPL_ISCSI_HDR = 0x33,
CPL_RDMA_CQE = 0x35, CPL_RDMA_CQE = 0x35,
...@@ -83,6 +84,10 @@ enum { ...@@ -83,6 +84,10 @@ enum {
CPL_PASS_OPEN_REQ6 = 0x81, CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83, CPL_ACT_OPEN_REQ6 = 0x83,
CPL_TX_TLS_PDU = 0x88,
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
CPL_RDMA_TERMINATE = 0xA2, CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4, CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5, CPL_SGE_EGR_UPDATE = 0xA5,
...@@ -94,6 +99,8 @@ enum { ...@@ -94,6 +99,8 @@ enum {
CPL_FW4_PLD = 0xC1, CPL_FW4_PLD = 0xC1,
CPL_FW4_ACK = 0xC3, CPL_FW4_ACK = 0xC3,
CPL_RX_PHYS_DSGL = 0xD0,
CPL_FW6_MSG = 0xE0, CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1, CPL_FW6_PLD = 0xE1,
CPL_TX_PKT_LSO = 0xED, CPL_TX_PKT_LSO = 0xED,
...@@ -1362,6 +1369,15 @@ struct ulptx_idata { ...@@ -1362,6 +1369,15 @@ struct ulptx_idata {
__be32 len; __be32 len;
}; };
struct ulp_txpkt {
__be32 cmd_dest;
__be32 len;
};
#define ULPTX_CMD_S 24
#define ULPTX_CMD_M 0xFF
#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
#define ULPTX_NSGE_S 0 #define ULPTX_NSGE_S 0
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) #define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
...@@ -1369,6 +1385,22 @@ struct ulptx_idata { ...@@ -1369,6 +1385,22 @@ struct ulptx_idata {
#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S) #define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
#define ULPTX_MORE_F ULPTX_MORE_V(1U) #define ULPTX_MORE_F ULPTX_MORE_V(1U)
#define ULP_TXPKT_DEST_S 16
#define ULP_TXPKT_DEST_M 0x3
#define ULP_TXPKT_DEST_V(x) ((x) << ULP_TXPKT_DEST_S)
#define ULP_TXPKT_FID_S 4
#define ULP_TXPKT_FID_M 0x7ff
#define ULP_TXPKT_FID_V(x) ((x) << ULP_TXPKT_FID_S)
#define ULP_TXPKT_RO_S 3
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
struct ulp_mem_io { struct ulp_mem_io {
WR_HDR; WR_HDR;
__be32 cmd; __be32 cmd;
...@@ -1406,4 +1438,409 @@ struct ulp_mem_io { ...@@ -1406,4 +1438,409 @@ struct ulp_mem_io {
#define ULP_MEMIO_DATA_LEN_S 0 #define ULP_MEMIO_DATA_LEN_S 0
#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) #define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_M 0xFFFF
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
#define ULPTX_NSGE_G(x) (((x) >> ULPTX_NSGE_S) & ULPTX_NSGE_M)
struct ulptx_sc_memrd {
__be32 cmd_to_len;
__be32 addr;
};
#define ULP_TXPKT_DATAMODIFY_S 23
#define ULP_TXPKT_DATAMODIFY_M 0x1
#define ULP_TXPKT_DATAMODIFY_V(x) ((x) << ULP_TXPKT_DATAMODIFY_S)
#define ULP_TXPKT_DATAMODIFY_G(x) \
(((x) >> ULP_TXPKT_DATAMODIFY_S) & ULP_TXPKT_DATAMODIFY__M)
#define ULP_TXPKT_DATAMODIFY_F ULP_TXPKT_DATAMODIFY_V(1U)
#define ULP_TXPKT_CHANNELID_S 22
#define ULP_TXPKT_CHANNELID_M 0x1
#define ULP_TXPKT_CHANNELID_V(x) ((x) << ULP_TXPKT_CHANNELID_S)
#define ULP_TXPKT_CHANNELID_G(x) \
(((x) >> ULP_TXPKT_CHANNELID_S) & ULP_TXPKT_CHANNELID_M)
#define ULP_TXPKT_CHANNELID_F ULP_TXPKT_CHANNELID_V(1U)
#define SCMD_SEQ_NO_CTRL_S 29
#define SCMD_SEQ_NO_CTRL_M 0x3
#define SCMD_SEQ_NO_CTRL_V(x) ((x) << SCMD_SEQ_NO_CTRL_S)
#define SCMD_SEQ_NO_CTRL_G(x) \
(((x) >> SCMD_SEQ_NO_CTRL_S) & SCMD_SEQ_NO_CTRL_M)
/* StsFieldPrsnt- Status field at the end of the TLS PDU */
#define SCMD_STATUS_PRESENT_S 28
#define SCMD_STATUS_PRESENT_M 0x1
#define SCMD_STATUS_PRESENT_V(x) ((x) << SCMD_STATUS_PRESENT_S)
#define SCMD_STATUS_PRESENT_G(x) \
(((x) >> SCMD_STATUS_PRESENT_S) & SCMD_STATUS_PRESENT_M)
#define SCMD_STATUS_PRESENT_F SCMD_STATUS_PRESENT_V(1U)
/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic,
* 3-15: Reserved.
*/
#define SCMD_PROTO_VERSION_S 24
#define SCMD_PROTO_VERSION_M 0xf
#define SCMD_PROTO_VERSION_V(x) ((x) << SCMD_PROTO_VERSION_S)
#define SCMD_PROTO_VERSION_G(x) \
(((x) >> SCMD_PROTO_VERSION_S) & SCMD_PROTO_VERSION_M)
/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */
#define SCMD_ENC_DEC_CTRL_S 23
#define SCMD_ENC_DEC_CTRL_M 0x1
#define SCMD_ENC_DEC_CTRL_V(x) ((x) << SCMD_ENC_DEC_CTRL_S)
#define SCMD_ENC_DEC_CTRL_G(x) \
(((x) >> SCMD_ENC_DEC_CTRL_S) & SCMD_ENC_DEC_CTRL_M)
#define SCMD_ENC_DEC_CTRL_F SCMD_ENC_DEC_CTRL_V(1U)
/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */
#define SCMD_CIPH_AUTH_SEQ_CTRL_S 22
#define SCMD_CIPH_AUTH_SEQ_CTRL_M 0x1
#define SCMD_CIPH_AUTH_SEQ_CTRL_V(x) \
((x) << SCMD_CIPH_AUTH_SEQ_CTRL_S)
#define SCMD_CIPH_AUTH_SEQ_CTRL_G(x) \
(((x) >> SCMD_CIPH_AUTH_SEQ_CTRL_S) & SCMD_CIPH_AUTH_SEQ_CTRL_M)
#define SCMD_CIPH_AUTH_SEQ_CTRL_F SCMD_CIPH_AUTH_SEQ_CTRL_V(1U)
/* CiphMode - Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR,
* 4:Generic-AES, 5-15: Reserved.
*/
#define SCMD_CIPH_MODE_S 18
#define SCMD_CIPH_MODE_M 0xf
#define SCMD_CIPH_MODE_V(x) ((x) << SCMD_CIPH_MODE_S)
#define SCMD_CIPH_MODE_G(x) \
(((x) >> SCMD_CIPH_MODE_S) & SCMD_CIPH_MODE_M)
/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256
* 4-15: Reserved
*/
#define SCMD_AUTH_MODE_S 14
#define SCMD_AUTH_MODE_M 0xf
#define SCMD_AUTH_MODE_V(x) ((x) << SCMD_AUTH_MODE_S)
#define SCMD_AUTH_MODE_G(x) \
(((x) >> SCMD_AUTH_MODE_S) & SCMD_AUTH_MODE_M)
/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation
* per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved
*/
#define SCMD_HMAC_CTRL_S 11
#define SCMD_HMAC_CTRL_M 0x7
#define SCMD_HMAC_CTRL_V(x) ((x) << SCMD_HMAC_CTRL_S)
#define SCMD_HMAC_CTRL_G(x) \
(((x) >> SCMD_HMAC_CTRL_S) & SCMD_HMAC_CTRL_M)
/* IvSize - IV size in units of 2 bytes */
#define SCMD_IV_SIZE_S 7
#define SCMD_IV_SIZE_M 0xf
#define SCMD_IV_SIZE_V(x) ((x) << SCMD_IV_SIZE_S)
#define SCMD_IV_SIZE_G(x) \
(((x) >> SCMD_IV_SIZE_S) & SCMD_IV_SIZE_M)
/* NumIVs - Number of IVs */
#define SCMD_NUM_IVS_S 0
#define SCMD_NUM_IVS_M 0x7f
#define SCMD_NUM_IVS_V(x) ((x) << SCMD_NUM_IVS_S)
#define SCMD_NUM_IVS_G(x) \
(((x) >> SCMD_NUM_IVS_S) & SCMD_NUM_IVS_M)
/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber
* (below) are used as Cid (connection id for debug status), these
* bits are padded to zero for forming the 64 bit
* sequence number for TLS
*/
#define SCMD_ENB_DBGID_S 31
#define SCMD_ENB_DBGID_M 0x1
#define SCMD_ENB_DBGID_V(x) ((x) << SCMD_ENB_DBGID_S)
#define SCMD_ENB_DBGID_G(x) \
(((x) >> SCMD_ENB_DBGID_S) & SCMD_ENB_DBGID_M)
/* IV generation in SW. */
#define SCMD_IV_GEN_CTRL_S 30
#define SCMD_IV_GEN_CTRL_M 0x1
#define SCMD_IV_GEN_CTRL_V(x) ((x) << SCMD_IV_GEN_CTRL_S)
#define SCMD_IV_GEN_CTRL_G(x) \
(((x) >> SCMD_IV_GEN_CTRL_S) & SCMD_IV_GEN_CTRL_M)
#define SCMD_IV_GEN_CTRL_F SCMD_IV_GEN_CTRL_V(1U)
/* More frags */
#define SCMD_MORE_FRAGS_S 20
#define SCMD_MORE_FRAGS_M 0x1
#define SCMD_MORE_FRAGS_V(x) ((x) << SCMD_MORE_FRAGS_S)
#define SCMD_MORE_FRAGS_G(x) (((x) >> SCMD_MORE_FRAGS_S) & SCMD_MORE_FRAGS_M)
/*last frag */
#define SCMD_LAST_FRAG_S 19
#define SCMD_LAST_FRAG_M 0x1
#define SCMD_LAST_FRAG_V(x) ((x) << SCMD_LAST_FRAG_S)
#define SCMD_LAST_FRAG_G(x) (((x) >> SCMD_LAST_FRAG_S) & SCMD_LAST_FRAG_M)
/* TlsCompPdu */
#define SCMD_TLS_COMPPDU_S 18
#define SCMD_TLS_COMPPDU_M 0x1
#define SCMD_TLS_COMPPDU_V(x) ((x) << SCMD_TLS_COMPPDU_S)
#define SCMD_TLS_COMPPDU_G(x) (((x) >> SCMD_TLS_COMPPDU_S) & SCMD_TLS_COMPPDU_M)
/* KeyCntxtInline - Key context inline after the scmd OR PayloadOnly*/
#define SCMD_KEY_CTX_INLINE_S 17
#define SCMD_KEY_CTX_INLINE_M 0x1
#define SCMD_KEY_CTX_INLINE_V(x) ((x) << SCMD_KEY_CTX_INLINE_S)
#define SCMD_KEY_CTX_INLINE_G(x) \
(((x) >> SCMD_KEY_CTX_INLINE_S) & SCMD_KEY_CTX_INLINE_M)
#define SCMD_KEY_CTX_INLINE_F SCMD_KEY_CTX_INLINE_V(1U)
/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */
#define SCMD_TLS_FRAG_ENABLE_S 16
#define SCMD_TLS_FRAG_ENABLE_M 0x1
#define SCMD_TLS_FRAG_ENABLE_V(x) ((x) << SCMD_TLS_FRAG_ENABLE_S)
#define SCMD_TLS_FRAG_ENABLE_G(x) \
(((x) >> SCMD_TLS_FRAG_ENABLE_S) & SCMD_TLS_FRAG_ENABLE_M)
#define SCMD_TLS_FRAG_ENABLE_F SCMD_TLS_FRAG_ENABLE_V(1U)
/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only
* modes, in this case TLS_TX will drop the PDU and only
* send back the MAC bytes.
*/
#define SCMD_MAC_ONLY_S 15
#define SCMD_MAC_ONLY_M 0x1
#define SCMD_MAC_ONLY_V(x) ((x) << SCMD_MAC_ONLY_S)
#define SCMD_MAC_ONLY_G(x) \
(((x) >> SCMD_MAC_ONLY_S) & SCMD_MAC_ONLY_M)
#define SCMD_MAC_ONLY_F SCMD_MAC_ONLY_V(1U)
/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols
* which have complex AAD and IV formations Eg:AES-CCM
*/
#define SCMD_AADIVDROP_S 14
#define SCMD_AADIVDROP_M 0x1
#define SCMD_AADIVDROP_V(x) ((x) << SCMD_AADIVDROP_S)
#define SCMD_AADIVDROP_G(x) \
(((x) >> SCMD_AADIVDROP_S) & SCMD_AADIVDROP_M)
#define SCMD_AADIVDROP_F SCMD_AADIVDROP_V(1U)
/* HdrLength - Length of all headers excluding TLS header
* present before start of crypto PDU/payload.
*/
#define SCMD_HDR_LEN_S 0
#define SCMD_HDR_LEN_M 0x3fff
#define SCMD_HDR_LEN_V(x) ((x) << SCMD_HDR_LEN_S)
#define SCMD_HDR_LEN_G(x) \
(((x) >> SCMD_HDR_LEN_S) & SCMD_HDR_LEN_M)
struct cpl_tx_sec_pdu {
__be32 op_ivinsrtofst;
__be32 pldlen;
__be32 aadstart_cipherstop_hi;
__be32 cipherstop_lo_authinsert;
__be32 seqno_numivs;
__be32 ivgen_hdrlen;
__be64 scmd1;
};
#define CPL_TX_SEC_PDU_OPCODE_S 24
#define CPL_TX_SEC_PDU_OPCODE_M 0xff
#define CPL_TX_SEC_PDU_OPCODE_V(x) ((x) << CPL_TX_SEC_PDU_OPCODE_S)
#define CPL_TX_SEC_PDU_OPCODE_G(x) \
(((x) >> CPL_TX_SEC_PDU_OPCODE_S) & CPL_TX_SEC_PDU_OPCODE_M)
/* RX Channel Id */
#define CPL_TX_SEC_PDU_RXCHID_S 22
#define CPL_TX_SEC_PDU_RXCHID_M 0x1
#define CPL_TX_SEC_PDU_RXCHID_V(x) ((x) << CPL_TX_SEC_PDU_RXCHID_S)
#define CPL_TX_SEC_PDU_RXCHID_G(x) \
(((x) >> CPL_TX_SEC_PDU_RXCHID_S) & CPL_TX_SEC_PDU_RXCHID_M)
#define CPL_TX_SEC_PDU_RXCHID_F CPL_TX_SEC_PDU_RXCHID_V(1U)
/* Ack Follows */
#define CPL_TX_SEC_PDU_ACKFOLLOWS_S 21
#define CPL_TX_SEC_PDU_ACKFOLLOWS_M 0x1
#define CPL_TX_SEC_PDU_ACKFOLLOWS_V(x) ((x) << CPL_TX_SEC_PDU_ACKFOLLOWS_S)
#define CPL_TX_SEC_PDU_ACKFOLLOWS_G(x) \
(((x) >> CPL_TX_SEC_PDU_ACKFOLLOWS_S) & CPL_TX_SEC_PDU_ACKFOLLOWS_M)
#define CPL_TX_SEC_PDU_ACKFOLLOWS_F CPL_TX_SEC_PDU_ACKFOLLOWS_V(1U)
/* Loopback bit in cpl_tx_sec_pdu */
#define CPL_TX_SEC_PDU_ULPTXLPBK_S 20
#define CPL_TX_SEC_PDU_ULPTXLPBK_M 0x1
#define CPL_TX_SEC_PDU_ULPTXLPBK_V(x) ((x) << CPL_TX_SEC_PDU_ULPTXLPBK_S)
#define CPL_TX_SEC_PDU_ULPTXLPBK_G(x) \
(((x) >> CPL_TX_SEC_PDU_ULPTXLPBK_S) & CPL_TX_SEC_PDU_ULPTXLPBK_M)
#define CPL_TX_SEC_PDU_ULPTXLPBK_F CPL_TX_SEC_PDU_ULPTXLPBK_V(1U)
/* Length of cpl header encapsulated */
#define CPL_TX_SEC_PDU_CPLLEN_S 16
#define CPL_TX_SEC_PDU_CPLLEN_M 0xf
#define CPL_TX_SEC_PDU_CPLLEN_V(x) ((x) << CPL_TX_SEC_PDU_CPLLEN_S)
#define CPL_TX_SEC_PDU_CPLLEN_G(x) \
(((x) >> CPL_TX_SEC_PDU_CPLLEN_S) & CPL_TX_SEC_PDU_CPLLEN_M)
/* PlaceHolder */
#define CPL_TX_SEC_PDU_PLACEHOLDER_S 10
#define CPL_TX_SEC_PDU_PLACEHOLDER_M 0x1
#define CPL_TX_SEC_PDU_PLACEHOLDER_V(x) ((x) << CPL_TX_SEC_PDU_PLACEHOLDER_S)
#define CPL_TX_SEC_PDU_PLACEHOLDER_G(x) \
(((x) >> CPL_TX_SEC_PDU_PLACEHOLDER_S) & \
CPL_TX_SEC_PDU_PLACEHOLDER_M)
/* IvInsrtOffset: Insertion location for IV */
#define CPL_TX_SEC_PDU_IVINSRTOFST_S 0
#define CPL_TX_SEC_PDU_IVINSRTOFST_M 0x3ff
#define CPL_TX_SEC_PDU_IVINSRTOFST_V(x) ((x) << CPL_TX_SEC_PDU_IVINSRTOFST_S)
#define CPL_TX_SEC_PDU_IVINSRTOFST_G(x) \
(((x) >> CPL_TX_SEC_PDU_IVINSRTOFST_S) & \
CPL_TX_SEC_PDU_IVINSRTOFST_M)
/* AadStartOffset: Offset in bytes for AAD start from
* the first byte following the pkt headers (0-255 bytes)
*/
#define CPL_TX_SEC_PDU_AADSTART_S 24
#define CPL_TX_SEC_PDU_AADSTART_M 0xff
#define CPL_TX_SEC_PDU_AADSTART_V(x) ((x) << CPL_TX_SEC_PDU_AADSTART_S)
#define CPL_TX_SEC_PDU_AADSTART_G(x) \
(((x) >> CPL_TX_SEC_PDU_AADSTART_S) & \
CPL_TX_SEC_PDU_AADSTART_M)
/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following
* the pkt headers (0-511 bytes)
*/
#define CPL_TX_SEC_PDU_AADSTOP_S 15
#define CPL_TX_SEC_PDU_AADSTOP_M 0x1ff
#define CPL_TX_SEC_PDU_AADSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AADSTOP_S)
#define CPL_TX_SEC_PDU_AADSTOP_G(x) \
(((x) >> CPL_TX_SEC_PDU_AADSTOP_S) & CPL_TX_SEC_PDU_AADSTOP_M)
/* CipherStartOffset: offset in bytes for encryption/decryption start from the
* first byte following the pkt headers (0-1023 bytes)
*/
#define CPL_TX_SEC_PDU_CIPHERSTART_S 5
#define CPL_TX_SEC_PDU_CIPHERSTART_M 0x3ff
#define CPL_TX_SEC_PDU_CIPHERSTART_V(x) ((x) << CPL_TX_SEC_PDU_CIPHERSTART_S)
#define CPL_TX_SEC_PDU_CIPHERSTART_G(x) \
(((x) >> CPL_TX_SEC_PDU_CIPHERSTART_S) & \
CPL_TX_SEC_PDU_CIPHERSTART_M)
/* CipherStopOffset: offset in bytes for encryption/decryption end
* from end of the payload of this command (0-511 bytes)
*/
#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_S 0
#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_M 0x1f
#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_V(x) \
((x) << CPL_TX_SEC_PDU_CIPHERSTOP_HI_S)
#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_G(x) \
(((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) & \
CPL_TX_SEC_PDU_CIPHERSTOP_HI_M)
#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_S 28
#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_M 0xf
#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_V(x) \
((x) << CPL_TX_SEC_PDU_CIPHERSTOP_LO_S)
#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_G(x) \
(((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) & \
CPL_TX_SEC_PDU_CIPHERSTOP_LO_M)
/* AuthStartOffset: offset in bytes for authentication start from
* the first byte following the pkt headers (0-1023)
*/
#define CPL_TX_SEC_PDU_AUTHSTART_S 18
#define CPL_TX_SEC_PDU_AUTHSTART_M 0x3ff
#define CPL_TX_SEC_PDU_AUTHSTART_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTART_S)
#define CPL_TX_SEC_PDU_AUTHSTART_G(x) \
(((x) >> CPL_TX_SEC_PDU_AUTHSTART_S) & \
CPL_TX_SEC_PDU_AUTHSTART_M)
/* AuthStopOffset: offset in bytes for authentication
* end from end of the payload of this command (0-511 Bytes)
*/
#define CPL_TX_SEC_PDU_AUTHSTOP_S 9
#define CPL_TX_SEC_PDU_AUTHSTOP_M 0x1ff
#define CPL_TX_SEC_PDU_AUTHSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTOP_S)
#define CPL_TX_SEC_PDU_AUTHSTOP_G(x) \
(((x) >> CPL_TX_SEC_PDU_AUTHSTOP_S) & \
CPL_TX_SEC_PDU_AUTHSTOP_M)
/* AuthInsrtOffset: offset in bytes for authentication insertion
* from end of the payload of this command (0-511 bytes)
*/
#define CPL_TX_SEC_PDU_AUTHINSERT_S 0
#define CPL_TX_SEC_PDU_AUTHINSERT_M 0x1ff
#define CPL_TX_SEC_PDU_AUTHINSERT_V(x) ((x) << CPL_TX_SEC_PDU_AUTHINSERT_S)
#define CPL_TX_SEC_PDU_AUTHINSERT_G(x) \
(((x) >> CPL_TX_SEC_PDU_AUTHINSERT_S) & \
CPL_TX_SEC_PDU_AUTHINSERT_M)
struct cpl_rx_phys_dsgl {
__be32 op_to_tid;
__be32 pcirlxorder_to_noofsgentr;
struct rss_header rss_hdr_int;
};
#define CPL_RX_PHYS_DSGL_OPCODE_S 24
#define CPL_RX_PHYS_DSGL_OPCODE_M 0xff
#define CPL_RX_PHYS_DSGL_OPCODE_V(x) ((x) << CPL_RX_PHYS_DSGL_OPCODE_S)
#define CPL_RX_PHYS_DSGL_OPCODE_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_OPCODE_S) & CPL_RX_PHYS_DSGL_OPCODE_M)
#define CPL_RX_PHYS_DSGL_ISRDMA_S 23
#define CPL_RX_PHYS_DSGL_ISRDMA_M 0x1
#define CPL_RX_PHYS_DSGL_ISRDMA_V(x) ((x) << CPL_RX_PHYS_DSGL_ISRDMA_S)
#define CPL_RX_PHYS_DSGL_ISRDMA_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_ISRDMA_S) & CPL_RX_PHYS_DSGL_ISRDMA_M)
#define CPL_RX_PHYS_DSGL_ISRDMA_F CPL_RX_PHYS_DSGL_ISRDMA_V(1U)
#define CPL_RX_PHYS_DSGL_RSVD1_S 20
#define CPL_RX_PHYS_DSGL_RSVD1_M 0x7
#define CPL_RX_PHYS_DSGL_RSVD1_V(x) ((x) << CPL_RX_PHYS_DSGL_RSVD1_S)
#define CPL_RX_PHYS_DSGL_RSVD1_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_RSVD1_S) & \
CPL_RX_PHYS_DSGL_RSVD1_M)
#define CPL_RX_PHYS_DSGL_PCIRLXORDER_S 31
#define CPL_RX_PHYS_DSGL_PCIRLXORDER_M 0x1
#define CPL_RX_PHYS_DSGL_PCIRLXORDER_V(x) \
((x) << CPL_RX_PHYS_DSGL_PCIRLXORDER_S)
#define CPL_RX_PHYS_DSGL_PCIRLXORDER_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_PCIRLXORDER_S) & \
CPL_RX_PHYS_DSGL_PCIRLXORDER_M)
#define CPL_RX_PHYS_DSGL_PCIRLXORDER_F CPL_RX_PHYS_DSGL_PCIRLXORDER_V(1U)
#define CPL_RX_PHYS_DSGL_PCINOSNOOP_S 30
#define CPL_RX_PHYS_DSGL_PCINOSNOOP_M 0x1
#define CPL_RX_PHYS_DSGL_PCINOSNOOP_V(x) \
((x) << CPL_RX_PHYS_DSGL_PCINOSNOOP_S)
#define CPL_RX_PHYS_DSGL_PCINOSNOOP_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_PCINOSNOOP_S) & \
CPL_RX_PHYS_DSGL_PCINOSNOOP_M)
#define CPL_RX_PHYS_DSGL_PCINOSNOOP_F CPL_RX_PHYS_DSGL_PCINOSNOOP_V(1U)
#define CPL_RX_PHYS_DSGL_PCITPHNTENB_S 29
#define CPL_RX_PHYS_DSGL_PCITPHNTENB_M 0x1
#define CPL_RX_PHYS_DSGL_PCITPHNTENB_V(x) \
((x) << CPL_RX_PHYS_DSGL_PCITPHNTENB_S)
#define CPL_RX_PHYS_DSGL_PCITPHNTENB_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_PCITPHNTENB_S) & \
CPL_RX_PHYS_DSGL_PCITPHNTENB_M)
#define CPL_RX_PHYS_DSGL_PCITPHNTENB_F CPL_RX_PHYS_DSGL_PCITPHNTENB_V(1U)
#define CPL_RX_PHYS_DSGL_PCITPHNT_S 27
#define CPL_RX_PHYS_DSGL_PCITPHNT_M 0x3
#define CPL_RX_PHYS_DSGL_PCITPHNT_V(x) ((x) << CPL_RX_PHYS_DSGL_PCITPHNT_S)
#define CPL_RX_PHYS_DSGL_PCITPHNT_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_PCITPHNT_S) & \
CPL_RX_PHYS_DSGL_PCITPHNT_M)
#define CPL_RX_PHYS_DSGL_DCAID_S 16
#define CPL_RX_PHYS_DSGL_DCAID_M 0x7ff
#define CPL_RX_PHYS_DSGL_DCAID_V(x) ((x) << CPL_RX_PHYS_DSGL_DCAID_S)
#define CPL_RX_PHYS_DSGL_DCAID_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_DCAID_S) & \
CPL_RX_PHYS_DSGL_DCAID_M)
#define CPL_RX_PHYS_DSGL_NOOFSGENTR_S 0
#define CPL_RX_PHYS_DSGL_NOOFSGENTR_M 0xffff
#define CPL_RX_PHYS_DSGL_NOOFSGENTR_V(x) \
((x) << CPL_RX_PHYS_DSGL_NOOFSGENTR_S)
#define CPL_RX_PHYS_DSGL_NOOFSGENTR_G(x) \
(((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \
CPL_RX_PHYS_DSGL_NOOFSGENTR_M)
#endif /* __T4_MSG_H */ #endif /* __T4_MSG_H */
...@@ -102,6 +102,7 @@ enum fw_wr_opcodes { ...@@ -102,6 +102,7 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19, FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a, FW_RI_INV_LSTAG_WR = 0x1a,
FW_ISCSI_TX_DATA_WR = 0x45, FW_ISCSI_TX_DATA_WR = 0x45,
FW_CRYPTO_LOOKASIDE_WR = 0X6d,
FW_LASTC2E_WR = 0x70 FW_LASTC2E_WR = 0x70
}; };
...@@ -1060,7 +1061,7 @@ struct fw_caps_config_cmd { ...@@ -1060,7 +1061,7 @@ struct fw_caps_config_cmd {
__be16 niccaps; __be16 niccaps;
__be16 ofldcaps; __be16 ofldcaps;
__be16 rdmacaps; __be16 rdmacaps;
__be16 r4; __be16 cryptocaps;
__be16 iscsicaps; __be16 iscsicaps;
__be16 fcoecaps; __be16 fcoecaps;
__be32 cfcsum; __be32 cfcsum;
...@@ -3249,4 +3250,127 @@ struct fw_devlog_cmd { ...@@ -3249,4 +3250,127 @@ struct fw_devlog_cmd {
#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
#define MAX_IMM_OFLD_TX_DATA_WR_LEN (0xff + sizeof(struct fw_ofld_tx_data_wr))
struct fw_crypto_lookaside_wr {
__be32 op_to_cctx_size;
__be32 len16_pkd;
__be32 session_id;
__be32 rx_chid_to_rx_q_id;
__be32 key_addr;
__be32 pld_size_hash_size;
__be64 cookie;
};
#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_S 24
#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_M 0xff
#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_OPCODE_S)
#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) & \
FW_CRYPTO_LOOKASIDE_WR_OPCODE_M)
#define FW_CRYPTO_LOOKASIDE_WR_COMPL_S 23
#define FW_CRYPTO_LOOKASIDE_WR_COMPL_M 0x1
#define FW_CRYPTO_LOOKASIDE_WR_COMPL_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_COMPL_S)
#define FW_CRYPTO_LOOKASIDE_WR_COMPL_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_COMPL_S) & \
FW_CRYPTO_LOOKASIDE_WR_COMPL_M)
#define FW_CRYPTO_LOOKASIDE_WR_COMPL_F FW_CRYPTO_LOOKASIDE_WR_COMPL_V(1U)
#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S 15
#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M 0xff
#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S)
#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) & \
FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M)
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S 5
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S)
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) & \
FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M)
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S 0
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M 0x1f
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S)
#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) & \
FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M)
#define FW_CRYPTO_LOOKASIDE_WR_LEN16_S 0
#define FW_CRYPTO_LOOKASIDE_WR_LEN16_M 0xff
#define FW_CRYPTO_LOOKASIDE_WR_LEN16_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_LEN16_S)
#define FW_CRYPTO_LOOKASIDE_WR_LEN16_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_LEN16_S) & \
FW_CRYPTO_LOOKASIDE_WR_LEN16_M)
#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S 29
#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S)
#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) & \
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M)
#define FW_CRYPTO_LOOKASIDE_WR_LCB_S 27
#define FW_CRYPTO_LOOKASIDE_WR_LCB_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_LCB_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_LCB_S)
#define FW_CRYPTO_LOOKASIDE_WR_LCB_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_LCB_S) & FW_CRYPTO_LOOKASIDE_WR_LCB_M)
#define FW_CRYPTO_LOOKASIDE_WR_PHASH_S 25
#define FW_CRYPTO_LOOKASIDE_WR_PHASH_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_PHASH_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_PHASH_S)
#define FW_CRYPTO_LOOKASIDE_WR_PHASH_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_PHASH_S) & \
FW_CRYPTO_LOOKASIDE_WR_PHASH_M)
#define FW_CRYPTO_LOOKASIDE_WR_IV_S 23
#define FW_CRYPTO_LOOKASIDE_WR_IV_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_IV_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_IV_S)
#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M)
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_TX_CH_S)
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) & \
FW_CRYPTO_LOOKASIDE_WR_TX_CH_M)
#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S 0
#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M 0x3ff
#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S)
#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) & \
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M)
#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S 24
#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M 0xff
#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S)
#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) & \
FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M)
#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S 17
#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M 0x7f
#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(x) \
((x) << FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S)
#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M)
#endif /* _T4FW_INTERFACE_H_ */ #endif /* _T4FW_INTERFACE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment