Commit 072e8b1f authored by Jan Glauber's avatar Jan Glauber Committed by Greg Kroah-Hartman

crypto: cavium - Fix fallout from CONFIG_VMAP_STACK

commit 37ff02ac upstream.

Enabling virtual mapped kernel stacks breaks the thunderx_zip
driver. On compression or decompression the executing CPU hangs
in an endless loop. The reason for this is the usage of __pa
by the driver which does no longer work for an address that is
not part of the 1:1 mapping.

The zip driver allocates a result struct on the stack and needs
to tell the hardware the physical address within this struct
that is used to signal the completion of the request.

As the hardware gets the wrong address after the broken __pa
conversion it writes to an arbitrary address. The zip driver then
waits forever for the completion byte to contain a non-zero value.

Allocating the result struct from 1:1 mapped memory resolves this
bug.
Signed-off-by: default avatarJan Glauber <jglauber@cavium.com>
Reviewed-by: default avatarRobert Richter <rrichter@cavium.com>
Cc: stable <stable@vger.kernel.org> # 4.14
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4854c879
...@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen, ...@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
struct zip_kernel_ctx *zip_ctx) struct zip_kernel_ctx *zip_ctx)
{ {
struct zip_operation *zip_ops = NULL; struct zip_operation *zip_ops = NULL;
struct zip_state zip_state; struct zip_state *zip_state;
struct zip_device *zip = NULL; struct zip_device *zip = NULL;
int ret; int ret;
...@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen, ...@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
if (!zip) if (!zip)
return -ENODEV; return -ENODEV;
memset(&zip_state, 0, sizeof(struct zip_state)); zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
if (!zip_state)
return -ENOMEM;
zip_ops = &zip_ctx->zip_comp; zip_ops = &zip_ctx->zip_comp;
zip_ops->input_len = slen; zip_ops->input_len = slen;
zip_ops->output_len = *dlen; zip_ops->output_len = *dlen;
memcpy(zip_ops->input, src, slen); memcpy(zip_ops->input, src, slen);
ret = zip_deflate(zip_ops, &zip_state, zip); ret = zip_deflate(zip_ops, zip_state, zip);
if (!ret) { if (!ret) {
*dlen = zip_ops->output_len; *dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen); memcpy(dst, zip_ops->output, *dlen);
} }
kfree(zip_state);
return ret; return ret;
} }
...@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen, ...@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
struct zip_kernel_ctx *zip_ctx) struct zip_kernel_ctx *zip_ctx)
{ {
struct zip_operation *zip_ops = NULL; struct zip_operation *zip_ops = NULL;
struct zip_state zip_state; struct zip_state *zip_state;
struct zip_device *zip = NULL; struct zip_device *zip = NULL;
int ret; int ret;
...@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen, ...@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
if (!zip) if (!zip)
return -ENODEV; return -ENODEV;
memset(&zip_state, 0, sizeof(struct zip_state)); zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
if (!zip_state)
return -ENOMEM;
zip_ops = &zip_ctx->zip_decomp; zip_ops = &zip_ctx->zip_decomp;
memcpy(zip_ops->input, src, slen); memcpy(zip_ops->input, src, slen);
...@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen, ...@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
zip_ops->input_len = slen; zip_ops->input_len = slen;
zip_ops->output_len = *dlen; zip_ops->output_len = *dlen;
ret = zip_inflate(zip_ops, &zip_state, zip); ret = zip_inflate(zip_ops, zip_state, zip);
if (!ret) { if (!ret) {
*dlen = zip_ops->output_len; *dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen); memcpy(dst, zip_ops->output, *dlen);
} }
kfree(zip_state);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment