Commit 396bf4cd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

 - use-after-free in algif_aead

 - modular aesni regression when pcbc is modular but absent

 - bug causing IO page faults in ccp

 - double list add in ccp

 - NULL pointer dereference in qat (two patches)

 - panic in chcr

 - NULL pointer dereference in chcr

 - out-of-bound access in chcr

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: chcr - Fix key length for RFC4106
  crypto: algif_aead - Fix kernel panic on list_del
  crypto: aesni - Fix failure when pcbc module is absent
  crypto: ccp - Fix double add when creating new DMA command
  crypto: ccp - Fix DMA operations when IOMMU is enabled
  crypto: chcr - Check device is allocated before use
  crypto: chcr - Fix panic on dma_unmap_sg
  crypto: qat - zero esram only for DH85x devices
  crypto: qat - fix bar discovery for c62x
parents d5adbfcd 7c2cf1c4
...@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void) ...@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
aesni_simd_skciphers[i]; i++) aesni_simd_skciphers[i]; i++)
simd_skcipher_free(aesni_simd_skciphers[i]); simd_skcipher_free(aesni_simd_skciphers[i]);
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
aesni_simd_skciphers2[i].simd; i++) if (aesni_simd_skciphers2[i].simd)
simd_skcipher_free(aesni_simd_skciphers2[i].simd); simd_skcipher_free(aesni_simd_skciphers2[i].simd);
} }
static int __init aesni_init(void) static int __init aesni_init(void)
...@@ -1168,7 +1168,7 @@ static int __init aesni_init(void) ...@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
simd = simd_skcipher_create_compat(algname, drvname, basename); simd = simd_skcipher_create_compat(algname, drvname, basename);
err = PTR_ERR(simd); err = PTR_ERR(simd);
if (IS_ERR(simd)) if (IS_ERR(simd))
goto unregister_simds; continue;
aesni_simd_skciphers2[i].simd = simd; aesni_simd_skciphers2[i].simd = simd;
} }
......
...@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) ...@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
unlock: unlock:
list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
af_alg_free_sg(&rsgl->sgl); af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &ctx->first_rsgl) if (rsgl != &ctx->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl)); sock_kfree_s(sk, rsgl, sizeof(*rsgl));
list_del(&rsgl->list);
} }
INIT_LIST_HEAD(&ctx->list); INIT_LIST_HEAD(&ctx->list);
aead_wmem_wakeup(sk); aead_wmem_wakeup(sk);
......
...@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data) ...@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
static void ccp5_config(struct ccp_device *ccp) static void ccp5_config(struct ccp_device *ccp)
{ {
/* Public side */ /* Public side */
iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
} }
static void ccp5other_config(struct ccp_device *ccp) static void ccp5other_config(struct ccp_device *ccp)
......
...@@ -238,6 +238,7 @@ struct ccp_dma_chan { ...@@ -238,6 +238,7 @@ struct ccp_dma_chan {
struct ccp_device *ccp; struct ccp_device *ccp;
spinlock_t lock; spinlock_t lock;
struct list_head created;
struct list_head pending; struct list_head pending;
struct list_head active; struct list_head active;
struct list_head complete; struct list_head complete;
......
...@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan) ...@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
ccp_free_desc_resources(chan->ccp, &chan->complete); ccp_free_desc_resources(chan->ccp, &chan->complete);
ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending); ccp_free_desc_resources(chan->ccp, &chan->pending);
ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
} }
...@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) ...@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc); cookie = dma_cookie_assign(tx_desc);
list_del(&desc->entry);
list_add_tail(&desc->entry, &chan->pending); list_add_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
...@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, ...@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
spin_lock_irqsave(&chan->lock, sflags); spin_lock_irqsave(&chan->lock, sflags);
list_add_tail(&desc->entry, &chan->pending); list_add_tail(&desc->entry, &chan->created);
spin_unlock_irqrestore(&chan->lock, sflags); spin_unlock_irqrestore(&chan->lock, sflags);
...@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) ...@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
/*TODO: Purge the complete list? */ /*TODO: Purge the complete list? */
ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending); ccp_free_desc_resources(chan->ccp, &chan->pending);
ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
...@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) ...@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
chan->ccp = ccp; chan->ccp = ccp;
spin_lock_init(&chan->lock); spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->created);
INIT_LIST_HEAD(&chan->pending); INIT_LIST_HEAD(&chan->pending);
INIT_LIST_HEAD(&chan->active); INIT_LIST_HEAD(&chan->active);
INIT_LIST_HEAD(&chan->complete); INIT_LIST_HEAD(&chan->complete);
......
...@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, ...@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
case CRYPTO_ALG_TYPE_AEAD: case CRYPTO_ALG_TYPE_AEAD:
ctx_req.req.aead_req = (struct aead_request *)req; ctx_req.req.aead_req = (struct aead_request *)req;
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.reqctx->skb) { if (ctx_req.ctx.reqctx->skb) {
kfree_skb(ctx_req.ctx.reqctx->skb); kfree_skb(ctx_req.ctx.reqctx->skb);
...@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
struct scatterlist *src, *dst; struct scatterlist *src;
struct scatterlist src_sg[2], dst_sg[2];
unsigned int frags = 0, transhdr_len; unsigned int frags = 0, transhdr_len;
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
unsigned int kctx_len = 0; unsigned int kctx_len = 0;
...@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err; goto err;
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
dst = src; reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx); err = chcr_copy_assoc(req, aeadctx);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1; null = 1;
assoclen = 0; assoclen = 0;
} }
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) { if (reqctx->dst_nents <= 0) {
pr_err("AUTHENC:Invalid Destination sg entries\n"); pr_err("AUTHENC:Invalid Destination sg entries\n");
...@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid; sg_param.qid = qid;
sg_param.align = 0; sg_param.align = 0;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param)) &sg_param))
goto dstmap_fail; goto dstmap_fail;
...@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
struct scatterlist *src, *dst; struct scatterlist *src;
struct scatterlist src_sg[2], dst_sg[2];
unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len; unsigned int dst_size = 0, kctx_len;
unsigned int sub_type; unsigned int sub_type;
...@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err; goto err;
sub_type = get_aead_subtype(tfm); sub_type = get_aead_subtype(tfm);
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
dst = src; reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx); err = chcr_copy_assoc(req, aeadctx);
if (err) { if (err) {
pr_err("AAD copy to destination buffer fails\n"); pr_err("AAD copy to destination buffer fails\n");
return ERR_PTR(err); return ERR_PTR(err);
} }
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) { if (reqctx->dst_nents <= 0) {
pr_err("CCM:Invalid Destination sg entries\n"); pr_err("CCM:Invalid Destination sg entries\n");
...@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid; sg_param.qid = qid;
sg_param.align = 0; sg_param.align = 0;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param)) &sg_param))
goto dstmap_fail; goto dstmap_fail;
...@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
struct scatterlist *src, *dst; struct scatterlist *src;
struct scatterlist src_sg[2], dst_sg[2];
unsigned int frags = 0, transhdr_len; unsigned int frags = 0, transhdr_len;
unsigned int ivsize = AES_BLOCK_SIZE; unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len; unsigned int dst_size = 0, kctx_len;
...@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err; goto err;
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
dst = src; reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx); err = chcr_copy_assoc(req, aeadctx);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
if (!req->cryptlen) if (!req->cryptlen)
...@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
crypt_len = AES_BLOCK_SIZE; crypt_len = AES_BLOCK_SIZE;
else else
crypt_len = req->cryptlen; crypt_len = req->cryptlen;
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) { if (reqctx->dst_nents <= 0) {
pr_err("GCM:Invalid Destination sg entries\n"); pr_err("GCM:Invalid Destination sg entries\n");
...@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid; sg_param.qid = qid;
sg_param.align = 0; sg_param.align = 0;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param)) &sg_param))
goto dstmap_fail; goto dstmap_fail;
...@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
write_sg_to_skb(skb, &frags, src, req->cryptlen); write_sg_to_skb(skb, &frags, src, req->cryptlen);
} else { } else {
aes_gcm_empty_pld_pad(req->dst, authsize - 1); aes_gcm_empty_pld_pad(req->dst, authsize - 1);
write_sg_to_skb(skb, &frags, dst, crypt_len); write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
} }
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
...@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ck_size; unsigned int ck_size;
int ret = 0, key_ctx_size = 0; int ret = 0, key_ctx_size = 0;
if (get_aead_subtype(aead) == if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { keylen > 3) {
keylen -= 4; /* nonce/salt is present in the last 4 bytes */ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
memcpy(aeadctx->salt, key + keylen, 4); memcpy(aeadctx->salt, key + keylen, 4);
} }
......
...@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = { ...@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
int assign_chcr_device(struct chcr_dev **dev) int assign_chcr_device(struct chcr_dev **dev)
{ {
struct uld_ctx *u_ctx; struct uld_ctx *u_ctx;
int ret = -ENXIO;
/* /*
* Which device to use if multiple devices are available TODO * Which device to use if multiple devices are available TODO
...@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev) ...@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
* must go to the same device to maintain the ordering. * must go to the same device to maintain the ordering.
*/ */
mutex_lock(&dev_mutex); /* TODO ? */ mutex_lock(&dev_mutex); /* TODO ? */
u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); list_for_each_entry(u_ctx, &uld_ctx_list, entry)
if (!u_ctx) { if (u_ctx && u_ctx->dev) {
mutex_unlock(&dev_mutex); *dev = u_ctx->dev;
return -ENXIO; ret = 0;
break;
} }
*dev = u_ctx->dev;
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
return 0; return ret;
} }
static int chcr_dev_add(struct uld_ctx *u_ctx) static int chcr_dev_add(struct uld_ctx *u_ctx)
...@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) ...@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void) static int __init chcr_crypto_init(void)
{ {
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
pr_err("ULD register fail: No chcr crypto support in cxgb4"); pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1;
}
return 0; return 0;
} }
......
...@@ -158,6 +158,9 @@ struct ablk_ctx { ...@@ -158,6 +158,9 @@ struct ablk_ctx {
}; };
struct chcr_aead_reqctx { struct chcr_aead_reqctx {
struct sk_buff *skb; struct sk_buff *skb;
struct scatterlist *dst;
struct scatterlist srcffwd[2];
struct scatterlist dstffwd[2];
short int dst_nents; short int dst_nents;
u16 verify; u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
......
...@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask); &hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) { ADF_PCI_MAX_BARS * 2) {
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#define ADF_ERRSOU5 (0x3A000 + 0xD8) #define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
#define ADF_PCI_MAX_BARS 3 #define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32 #define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16 #define ADF_ETR_MAX_RINGS_PER_BANK 16
......
...@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) ...@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
unsigned int csr_val; unsigned int csr_val;
int times = 30; int times = 30;
if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
return 0; return 0;
csr_val = ADF_CSR_RD(csr_addr, 0); csr_val = ADF_CSR_RD(csr_addr, 0);
...@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) ...@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET); LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev; handle->pci_dev = pci_info->pci_dev;
if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
sram_bar = sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr; handle->hal_sram_addr_v = sram_bar->virt_addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment