Commit c597b6bc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "Algorithms:
   - Add RSA padding algorithm

  Drivers:
   - Add GCM mode support to atmel
   - Add atmel support for SAMA5D2 devices
   - Add cipher modes to talitos
   - Add rockchip driver for rk3288
   - Add qat support for C3XXX and C62X"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (103 commits)
  crypto: hifn_795x, picoxcell - use ablkcipher_request_cast
  crypto: qat - fix SKU definiftion for c3xxx dev
  crypto: qat - Fix random config build issue
  crypto: ccp - use to_pci_dev and to_platform_device
  crypto: qat - Rename dh895xcc mmp firmware
  crypto: 842 - remove WARN inside printk
  crypto: atmel-aes - add debug facilities to monitor register accesses.
  crypto: atmel-aes - add support to GCM mode
  crypto: atmel-aes - change the DMA threshold
  crypto: atmel-aes - fix the counter overflow in CTR mode
  crypto: atmel-aes - fix atmel-ctr-aes driver for RFC 3686
  crypto: atmel-aes - create sections to regroup functions by usage
  crypto: atmel-aes - fix typo and indentation
  crypto: atmel-aes - use SIZE_IN_WORDS() helper macro
  crypto: atmel-aes - improve performances of data transfer
  crypto: atmel-aes - fix atmel_aes_remove()
  crypto: atmel-aes - remove useless AES_FLAGS_DMA flag
  crypto: atmel-aes - reduce latency of DMA completion
  crypto: atmel-aes - remove unused 'err' member of struct atmel_aes_dev
  crypto: atmel-aes - rework crypto request completion
  ...
parents 60b7eca1 48d62764
Rockchip Electronics And Security Accelerator
Required properties:
- compatible: Should be "rockchip,rk3288-crypto"
- reg: Base physical address of the engine and length of memory mapped
region
- interrupts: Interrupt number
- clocks: Reference to the clocks about crypto
- clock-names: "aclk" used to clock data
"hclk" used to clock data
"sclk" used to clock crypto accelerator
"apb_pclk" used to clock dma
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the name "crypto-rst".
Examples:
crypto: cypto-controller@ff8a0000 {
compatible = "rockchip,rk3288-crypto";
reg = <0xff8a0000 0x4000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>,
<&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>;
clock-names = "aclk", "hclk", "sclk", "apb_pclk";
resets = <&cru SRST_CRYPTO>;
reset-names = "crypto-rst";
status = "okay";
};
......@@ -164,6 +164,7 @@ struct coprocessor_request_block {
#define ICSWX_INITIATED (0x8)
#define ICSWX_BUSY (0x4)
#define ICSWX_REJECTED (0x2)
#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
{
......
......@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
}
}
static int ghash_async_import(struct ahash_request *req, const void *in)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
ghash_async_init(req);
memcpy(dctx, in, sizeof(*dctx));
return 0;
}
static int ghash_async_export(struct ahash_request *req, void *out)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memcpy(out, dctx, sizeof(*dctx));
return 0;
}
static int ghash_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
......@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
.export = ghash_async_export,
.import = ghash_async_import,
.halg = {
.digestsize = GHASH_DIGEST_SIZE,
.statesize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
......
......@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
rsa_generic-y += rsaprivkey-asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
cryptomgr-y := algboss.o testmgr.o
......
......@@ -21,6 +21,7 @@
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
#include "internal.h"
#ifdef CONFIG_NET
......@@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
static void crypto_akcipher_free_instance(struct crypto_instance *inst)
{
struct akcipher_instance *akcipher = akcipher_instance(inst);
akcipher->free(akcipher);
}
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
.free = crypto_akcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
......@@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = {
.tfmsize = offsetof(struct crypto_akcipher, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
u32 type, u32 mask)
{
spawn->base.frontend = &crypto_akcipher_type;
return crypto_grab_spawn(&spawn->base, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
{
......@@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
int crypto_register_akcipher(struct akcipher_alg *alg)
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
}
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_akcipher);
......@@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
int akcipher_register_instance(struct crypto_template *tmpl,
struct akcipher_instance *inst)
{
akcipher_prepare_alg(&inst->alg);
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");
......@@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
{
struct crypto_spawn *spawn, *n;
if (list_empty(stack))
spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
if (!spawn)
return NULL;
spawn = list_first_entry(stack, struct crypto_spawn, list);
n = list_entry(spawn->list.next, struct crypto_spawn, list);
n = list_next_entry(spawn, list);
if (spawn->alg && &n->list != stack && !n->alg)
n->alg = (n->list.next == stack) ? alg :
&list_entry(n->list.next, struct crypto_spawn,
list)->inst->alg;
&list_next_entry(n, list)->inst->alg;
list_move(&spawn->list, secondary_spawns);
......
......@@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
while (size) {
unsigned long len = size;
size_t len = size;
struct scatterlist *sg = NULL;
/* use the existing memory in an allocated page */
......@@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* allocate a new page */
len = min_t(unsigned long, size, aead_sndbuf(sk));
while (len) {
int plen = 0;
size_t plen = 0;
if (sgl->cur >= ALG_MAX_PAGES) {
aead_put_sgl(sk);
......@@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
sg = sgl->sg + sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg, alloc_page(GFP_KERNEL));
err = -ENOMEM;
......
......@@ -40,7 +40,7 @@ struct skcipher_ctx {
struct af_alg_completion completion;
atomic_t inflight;
unsigned used;
size_t used;
unsigned int len;
bool more;
......@@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
return 0;
}
static void skcipher_pull_sgl(struct sock *sk, int used, int put)
static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
......@@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put)
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
int plen = min_t(int, used, sg[i].length);
size_t plen = min_t(size_t, used, sg[i].length);
if (!sg_page(sg + i))
continue;
......@@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
while (size) {
struct scatterlist *sg;
unsigned long len = size;
int plen;
size_t plen;
if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev,
......@@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
err = -ENOMEM;
......
......@@ -13,7 +13,7 @@
#define pr_fmt(fmt) "SIG: "fmt
#include <keys/asymmetric-subtype.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/err.h>
#include <crypto/public_key.h>
#include "asymmetric_keys.h"
......
......@@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
if (rctx->cryptlen == 0)
goto skip;
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
......@@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req)
if (err)
return err;
skip:
return poly_verify_tag(req);
}
......@@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
if (req->cryptlen == 0)
goto skip;
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
......@@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req)
if (err)
return err;
skip:
return poly_genkey(req);
}
......
......@@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
......@@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
type = crypto_skcipher_type(type);
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
......
......@@ -626,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
return len;
}
static struct drbg_state_ops drbg_ctr_ops = {
static const struct drbg_state_ops drbg_ctr_ops = {
.update = drbg_ctr_update,
.generate = drbg_ctr_generate,
.crypto_init = drbg_init_sym_kernel,
......@@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
return len;
}
static struct drbg_state_ops drbg_hmac_ops = {
static const struct drbg_state_ops drbg_hmac_ops = {
.update = drbg_hmac_update,
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
......@@ -1032,7 +1032,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
* scratchpad usage: as update and generate are used isolated, both
* can use the scratchpad
*/
static struct drbg_state_ops drbg_hash_ops = {
static const struct drbg_state_ops drbg_hash_ops = {
.update = drbg_hash_update,
.generate = drbg_hash_generate,
.crypto_init = drbg_init_hash_kernel,
......
......@@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void)
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
while (single_task_running()) {
mutex_lock(&flist->lock);
if (list_empty(&flist->list)) {
mutex_unlock(&flist->lock);
return;
}
cstate = list_entry(flist->list.next,
cstate = list_first_entry_or_null(&flist->list,
struct mcryptd_alg_cstate, flush_list);
if (!cstate->flusher_engaged) {
if (!cstate || !cstate->flusher_engaged) {
mutex_unlock(&flist->lock);
return;
}
......
......@@ -24,6 +24,12 @@
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
......
This diff is collapsed.
......@@ -13,6 +13,7 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
/*
* RSAEP function [RFC3447 sec 5.1.1]
......@@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req)
goto err_free_c;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_c;
}
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
......@@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req)
goto err_free_m;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_m;
}
ret = -ENOMEM;
c = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!c)
......@@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req)
goto err_free_s;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_s;
}
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
......@@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req)
goto err_free_m;
}
if (req->dst_len < mpi_get_size(pkey->n)) {
req->dst_len = mpi_get_size(pkey->n);
ret = -EOVERFLOW;
goto err_free_m;
}
ret = -ENOMEM;
s = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!s) {
......@@ -339,11 +316,24 @@ static struct akcipher_alg rsa = {
static int rsa_init(void)
{
return crypto_register_akcipher(&rsa);
int err;
err = crypto_register_akcipher(&rsa);
if (err)
return err;
err = crypto_register_template(&rsa_pkcs1pad_tmpl);
if (err) {
crypto_unregister_akcipher(&rsa);
return err;
}
return 0;
}
static void rsa_exit(void)
{
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}
......
......@@ -26,6 +26,13 @@
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09
};
EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{
......
......@@ -27,6 +27,22 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
0x2f
};
EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
};
EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
......
......@@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_20);
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
......
......@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
goto out;
}
mutex_lock(&reading_mutex);
if (mutex_lock_interruptible(&reading_mutex)) {
err = -ERESTARTSYS;
goto out_put;
}
if (!data_avail) {
bytes_read = rng_get_data(rng, rng_buffer,
rng_buffer_size(),
......@@ -288,6 +291,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
out_unlock_reading:
mutex_unlock(&reading_mutex);
out_put:
put_rng(rng);
goto out;
}
......
......@@ -17,7 +17,7 @@
#include <linux/init.h>
#include <linux/random.h>
#include <linux/hw_random.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
......@@ -29,11 +29,11 @@
/* param1: ptr, param2: count, param3: flag */
static u32 (*omap3_rom_rng_call)(u32, u32, u32);
static struct timer_list idle_timer;
static struct delayed_work idle_work;
static int rng_idle;
static struct clk *rng_clk;
static void omap3_rom_rng_idle(unsigned long data)
static void omap3_rom_rng_idle(struct work_struct *work)
{
int r;
......@@ -51,7 +51,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
u32 r;
u32 ptr;
del_timer_sync(&idle_timer);
cancel_delayed_work_sync(&idle_work);
if (rng_idle) {
clk_prepare_enable(rng_clk);
r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
......@@ -65,7 +65,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
ptr = virt_to_phys(buf);
r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
schedule_delayed_work(&idle_work, msecs_to_jiffies(500));
if (r != 0)
return -EINVAL;
return 0;
......@@ -102,7 +102,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
return -EINVAL;
}
setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle);
rng_clk = devm_clk_get(&pdev->dev, "ick");
if (IS_ERR(rng_clk)) {
pr_err("unable to get RNG clock\n");
......@@ -118,6 +118,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
static int omap3_rom_rng_remove(struct platform_device *pdev)
{
cancel_delayed_work_sync(&idle_work);
hwrng_unregister(&omap3_rom_rng_ops);
clk_disable_unprepare(rng_clk);
return 0;
......
......@@ -194,6 +194,9 @@ config CRYPTO_DEV_NIAGARA2
select CRYPTO_DES
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
depends on SPARC64
help
Each core of a Niagara2 processor contains a Stream
......@@ -378,10 +381,10 @@ config CRYPTO_DEV_BFIN_CRC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
depends on ARCH_AT91
depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
select AT_HDMAC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for
......@@ -498,4 +501,15 @@ config CRYPTO_DEV_SUN4I_SS
To compile this driver as a module, choose M here: the module
will be called sun4i-ss.
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
select CRYPTO_AES
select CRYPTO_DES
select CRYPTO_BLKCIPHER
help
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
endif # CRYPTO_HW
......@@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
......@@ -781,6 +781,10 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* figure how many gd is needed */
num_gd = sg_nents_for_len(src, datalen);
if ((int)num_gd < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
return -EINVAL;
}
if (num_gd == 1)
num_gd = 0;
......
......@@ -9,6 +9,7 @@
#define AES_MR 0x04
#define AES_MR_CYPHER_DEC (0 << 0)
#define AES_MR_CYPHER_ENC (1 << 0)
#define AES_MR_GTAGEN (1 << 1)
#define AES_MR_DUALBUFF (1 << 3)
#define AES_MR_PROCDLY_MASK (0xF << 4)
#define AES_MR_PROCDLY_OFFSET 4
......@@ -26,6 +27,7 @@
#define AES_MR_OPMOD_OFB (0x2 << 12)
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
......@@ -44,6 +46,7 @@
#define AES_ISR 0x1C
#define AES_INT_DATARDY (1 << 0)
#define AES_INT_URAD (1 << 8)
#define AES_INT_TAGRDY (1 << 16)
#define AES_ISR_URAT_MASK (0xF << 12)
#define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12)
#define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12)
......@@ -57,6 +60,13 @@
#define AES_ODATAR(x) (0x50 + ((x) * 0x04))
#define AES_IVR(x) (0x60 + ((x) * 0x04))
#define AES_AADLENR 0x70
#define AES_CLENR 0x74
#define AES_GHASHR(x) (0x78 + ((x) * 0x04))
#define AES_TAGR(x) (0x88 + ((x) * 0x04))
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */
This diff is collapsed.
......@@ -755,7 +755,6 @@ static int atmel_sha_finish(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
struct atmel_sha_dev *dd = ctx->dd;
int err = 0;
if (ctx->digcnt[0] || ctx->digcnt[1])
atmel_sha_copy_ready_hash(req);
......@@ -763,7 +762,7 @@ static int atmel_sha_finish(struct ahash_request *req)
dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
ctx->digcnt[0], ctx->bufcnt);
return err;
return 0;
}
static void atmel_sha_finish_req(struct ahash_request *req, int err)
......
......@@ -803,6 +803,10 @@ static int ahash_update_ctx(struct ahash_request *req)
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
......@@ -1002,6 +1006,10 @@ static int ahash_finup_ctx(struct ahash_request *req)
int sh_len;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
......@@ -1086,6 +1094,10 @@ static int ahash_digest(struct ahash_request *req)
int sh_len;
src_nents = sg_count(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
......@@ -1234,6 +1246,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_bytes = (1 + src_nents) *
sizeof(struct sec4_sg_entry);
......@@ -1342,6 +1358,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int ret = 0;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
sec4_sg_src_index = 2;
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
......@@ -1430,6 +1450,10 @@ static int ahash_update_first(struct ahash_request *req)
if (to_hash) {
src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
......@@ -1572,7 +1596,7 @@ static int ahash_export(struct ahash_request *req, void *out)
len = state->buflen_1;
} else {
buf = state->buf_0;
len = state->buflen_1;
len = state->buflen_0;
}
memcpy(export->buf, buf, len);
......
......@@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
select CRYPTO_SHA1
select CRYPTO_SHA256
help
Provides the interface to use the AMD Cryptographic Coprocessor
which can be used to offload encryption operations such as SHA,
......
......@@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
/* The CCP cannot perform zero-length sha operations so the caller
* is required to buffer data for the final operation. However, a
* sha operation for a message with a total length of zero is valid
* so known values are required to supply the result.
*/
static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
};
static u32 ccp_addr_lo(struct ccp_dma_info *info)
{
return lower_32_bits(info->address + info->offset);
......@@ -1391,18 +1365,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (sha->msg_bits)
return -EINVAL;
/* A sha operation for a message with a total length of zero,
* return known result.
/* The CCP cannot perform zero-length sha operations so the
* caller is required to buffer data for the final operation.
* However, a sha operation for a message with a total length
* of zero is valid so known values are required to supply
* the result.
*/
switch (sha->type) {
case CCP_SHA_TYPE_1:
sha_zero = ccp_sha1_zero;
sha_zero = sha1_zero_message_hash;
break;
case CCP_SHA_TYPE_224:
sha_zero = ccp_sha224_zero;
sha_zero = sha224_zero_message_hash;
break;
case CCP_SHA_TYPE_256:
sha_zero = ccp_sha256_zero;
sha_zero = sha256_zero_message_hash;
break;
default:
return -EINVAL;
......
......@@ -44,7 +44,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
{
struct ccp_pci *ccp_pci = ccp->dev_specific;
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
struct msix_entry msix_entry[MSIX_VECTORS];
unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
int v, ret;
......@@ -86,7 +86,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
static int ccp_get_msi_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
ret = pci_enable_msi(pdev);
......@@ -133,7 +133,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
{
struct ccp_pci *ccp_pci = ccp->dev_specific;
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
if (ccp_pci->msix_count) {
while (ccp_pci->msix_count--)
......@@ -149,7 +149,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
static int ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
resource_size_t io_len;
unsigned long io_flags;
......
......@@ -35,8 +35,7 @@ struct ccp_platform {
static int ccp_get_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct platform_device *pdev = to_platform_device(dev);
int ret;
ret = platform_get_irq(pdev, 0);
......@@ -78,8 +77,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct platform_device *pdev = to_platform_device(dev);
struct resource *ior;
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......
This diff is collapsed.
......@@ -510,10 +510,8 @@ static int init_ixp_crypto(struct device *dev)
printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
if (buffer_pool)
dma_pool_destroy(buffer_pool);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
return ret;
}
......
......@@ -401,7 +401,15 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
return -EINVAL;
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
if (creq->dst_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of dst SG");
return creq->dst_nents;
}
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
CESA_SA_DESC_CFG_OP_MSK);
......
......@@ -712,6 +712,10 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
creq->req.base.type = CESA_STD_REQ;
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
ret = mv_cesa_ahash_cache_req(req, cached);
if (ret)
......
......@@ -241,7 +241,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
struct n2_ahash_alg {
struct list_head entry;
const char *hash_zero;
const u8 *hash_zero;
const u32 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
......@@ -1267,7 +1267,7 @@ static LIST_HEAD(cipher_algs);
struct n2_hash_tmpl {
const char *name;
const char *hash_zero;
const u8 *hash_zero;
const u32 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
......@@ -1276,40 +1276,19 @@ struct n2_hash_tmpl {
u8 hmac_type;
};
static const char md5_zero[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
};
static const u32 md5_init[MD5_HASH_WORDS] = {
cpu_to_le32(MD5_H0),
cpu_to_le32(MD5_H1),
cpu_to_le32(MD5_H2),
cpu_to_le32(MD5_H3),
};
static const char sha1_zero[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
0x07, 0x09
};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
};
static const char sha256_zero[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
0x1b, 0x78, 0x52, 0xb8, 0x55
};
static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
};
static const char sha224_zero[SHA224_DIGEST_SIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
0x2f
};
static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
......@@ -1317,7 +1296,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
static const struct n2_hash_tmpl hash_tmpls[] = {
{ .name = "md5",
.hash_zero = md5_zero,
.hash_zero = md5_zero_message_hash,
.hash_init = md5_init,
.auth_type = AUTH_TYPE_MD5,
.hmac_type = AUTH_TYPE_HMAC_MD5,
......@@ -1325,7 +1304,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.digest_size = MD5_DIGEST_SIZE,
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero,
.hash_zero = sha1_zero_message_hash,
.hash_init = sha1_init,
.auth_type = AUTH_TYPE_SHA1,
.hmac_type = AUTH_TYPE_HMAC_SHA1,
......@@ -1333,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.digest_size = SHA1_DIGEST_SIZE,
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero,
.hash_zero = sha256_zero_message_hash,
.hash_init = sha256_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_HMAC_SHA256,
......@@ -1341,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero,
.hash_zero = sha224_zero_message_hash,
.hash_init = sha224_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_RESERVED,
......@@ -2243,22 +2222,19 @@ static struct platform_driver n2_mau_driver = {
.remove = n2_mau_remove,
};
static struct platform_driver * const drivers[] = {
&n2_crypto_driver,
&n2_mau_driver,
};
static int __init n2_init(void)
{
int err = platform_driver_register(&n2_crypto_driver);
if (!err) {
err = platform_driver_register(&n2_mau_driver);
if (err)
platform_driver_unregister(&n2_crypto_driver);
}
return err;
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
static void __exit n2_exit(void)
{
platform_driver_unregister(&n2_mau_driver);
platform_driver_unregister(&n2_crypto_driver);
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(n2_init);
......
......@@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
(unsigned int)ccw,
(unsigned int)be32_to_cpu(crb->ccw));
/*
* NX842 coprocessor sets 3rd bit in CR register with XER[S0].
* XER[S0] is the integer summary overflow bit which is nothing
* to do NX. Since this bit can be set with other return values,
* mask this bit.
*/
ret &= ~ICSWX_XERS0;
switch (ret) {
case ICSWX_INITIATED:
ret = wait_for_csb(wmem, csb);
......@@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
pr_err_ratelimited("ICSWX rejected\n");
ret = -EPROTO;
break;
default:
pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
ret = -EPROTO;
break;
}
if (!ret)
......@@ -525,7 +529,6 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
static int __init nx842_powernv_probe(struct device_node *dn)
{
struct nx842_coproc *coproc;
struct property *ct_prop, *ci_prop;
unsigned int ct, ci;
int chip_id;
......@@ -534,18 +537,16 @@ static int __init nx842_powernv_probe(struct device_node *dn)
pr_err("ibm,chip-id missing\n");
return -EINVAL;
}
ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL);
if (!ct_prop) {
if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
pr_err("ibm,842-coprocessor-type missing\n");
return -EINVAL;
}
ct = be32_to_cpu(*(unsigned int *)ct_prop->value);
ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL);
if (!ci_prop) {
if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
pr_err("ibm,842-coprocessor-instance missing\n");
return -EINVAL;
}
ci = be32_to_cpu(*(unsigned int *)ci_prop->value);
coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
if (!coproc)
......
......@@ -539,8 +539,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
{
int err = 0;
pr_debug("total: %d\n", dd->total);
omap_aes_dma_stop(dd);
......@@ -548,7 +546,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out);
return err;
return 0;
}
static int omap_aes_check_aligned(struct scatterlist *sg, int total)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment