Commit a20acf99 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:
 "Largely this is simply adding support for the Niagara 4 cpu.

  Major areas are perf events (chip now supports 4 counters and can
  monitor any event on each counter), crypto (opcodes are availble for
  sha1, sha256, sha512, md5, crc32c, AES, DES, CAMELLIA, and Kasumi
  although the last is unsupported since we lack a generic crypto layer
  Kasumi implementation), and an optimized memcpy.

  Finally some cleanups by Peter Senna Tschudin."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (47 commits)
  sparc64: Fix trailing whitespace in NG4 memcpy.
  sparc64: Fix comment type in NG4 copy from user.
  sparc64: Add SPARC-T4 optimized memcpy.
  drivers/sbus/char: removes unnecessary semicolon
  arch/sparc/kernel/pci_sun4v.c: removes unnecessary semicolon
  sparc64: Fix function argument comment in camellia_sparc64_key_expand asm.
  sparc64: Fix IV handling bug in des_sparc64_cbc_decrypt
  sparc64: Add auto-loading mechanism to crypto-opcode drivers.
  sparc64: Add missing pr_fmt define to crypto opcode drivers.
  sparc64: Adjust crypto priorities.
  sparc64: Use cpu_pgsz_mask for linear kernel mapping config.
  sparc64: Probe cpu page size support more portably.
  sparc64: Support 2GB and 16GB page sizes for kernel linear mappings.
  sparc64: Fix bugs in unrolled 256-bit loops.
  sparc64: Avoid code duplication in crypto assembler.
  sparc64: Unroll CTR crypt loops in AES driver.
  sparc64: Unroll ECB decryption loops in AES driver.
  sparc64: Unroll ECB encryption loops in AES driver.
  sparc64: Add ctr mode support to AES driver.
  sparc64: Move AES driver over to a methods based implementation.
  ...
parents 437589a7 42a4172b
......@@ -6,3 +6,4 @@ obj-y += kernel/
obj-y += mm/
obj-y += math-emu/
obj-y += net/
obj-y += crypto/
#
# Arch-specific CryptoAPI modules.
#
obj-$(CONFIG_CRYPTO_SHA1_SPARC64) += sha1-sparc64.o
obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o
obj-$(CONFIG_CRYPTO_SHA512_SPARC64) += sha512-sparc64.o
obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o
sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o
sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o
md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o
aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o
des-sparc64-y := des_asm.o des_glue.o crop_devid.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o
crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* Glue code for CAMELLIA encryption optimized for sparc64 crypto opcodes.
*
* Copyright (C) 2012 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <asm/fpumacro.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include "opcodes.h"
#define CAMELLIA_MIN_KEY_SIZE 16
#define CAMELLIA_MAX_KEY_SIZE 32
#define CAMELLIA_BLOCK_SIZE 16
#define CAMELLIA_TABLE_BYTE_LEN 272
struct camellia_sparc64_ctx {
u64 encrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
u64 decrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
int key_len;
};
extern void camellia_sparc64_key_expand(const u32 *in_key, u64 *encrypt_key,
unsigned int key_len, u64 *decrypt_key);
static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
unsigned int key_len)
{
struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
const u32 *in_key = (const u32 *) _in_key;
u32 *flags = &tfm->crt_flags;
if (key_len != 16 && key_len != 24 && key_len != 32) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->key_len = key_len;
camellia_sparc64_key_expand(in_key, &ctx->encrypt_key[0],
key_len, &ctx->decrypt_key[0]);
return 0;
}
extern void camellia_sparc64_crypt(const u64 *key, const u32 *input,
u32 *output, unsigned int key_len);
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
camellia_sparc64_crypt(&ctx->encrypt_key[0],
(const u32 *) src,
(u32 *) dst, ctx->key_len);
}
static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
camellia_sparc64_crypt(&ctx->decrypt_key[0],
(const u32 *) src,
(u32 *) dst, ctx->key_len);
}
extern void camellia_sparc64_load_keys(const u64 *key, unsigned int key_len);
typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len,
const u64 *key);
extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds;
extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds;
#define CAMELLIA_BLOCK_MASK (~(CAMELLIA_BLOCK_SIZE - 1))
static int __ecb_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes, bool encrypt)
{
struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
ecb_crypt_op *op;
const u64 *key;
int err;
op = camellia_sparc64_ecb_crypt_3_grand_rounds;
if (ctx->key_len != 16)
op = camellia_sparc64_ecb_crypt_4_grand_rounds;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
if (encrypt)
key = &ctx->encrypt_key[0];
else
key = &ctx->decrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
while ((nbytes = walk.nbytes)) {
unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
if (likely(block_len)) {
const u64 *src64;
u64 *dst64;
src64 = (const u64 *)walk.src.virt.addr;
dst64 = (u64 *) walk.dst.virt.addr;
op(src64, dst64, block_len, key);
}
nbytes &= CAMELLIA_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
fprs_write(0);
return err;
}
static int ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
return __ecb_crypt(desc, dst, src, nbytes, true);
}
static int ecb_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
return __ecb_crypt(desc, dst, src, nbytes, false);
}
typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len,
const u64 *key, u64 *iv);
extern cbc_crypt_op camellia_sparc64_cbc_encrypt_3_grand_rounds;
extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds;
extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds;
extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds;
static int cbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
cbc_crypt_op *op;
const u64 *key;
int err;
op = camellia_sparc64_cbc_encrypt_3_grand_rounds;
if (ctx->key_len != 16)
op = camellia_sparc64_cbc_encrypt_4_grand_rounds;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
key = &ctx->encrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
while ((nbytes = walk.nbytes)) {
unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
if (likely(block_len)) {
const u64 *src64;
u64 *dst64;
src64 = (const u64 *)walk.src.virt.addr;
dst64 = (u64 *) walk.dst.virt.addr;
op(src64, dst64, block_len, key,
(u64 *) walk.iv);
}
nbytes &= CAMELLIA_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
fprs_write(0);
return err;
}
static int cbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
cbc_crypt_op *op;
const u64 *key;
int err;
op = camellia_sparc64_cbc_decrypt_3_grand_rounds;
if (ctx->key_len != 16)
op = camellia_sparc64_cbc_decrypt_4_grand_rounds;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
key = &ctx->decrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
while ((nbytes = walk.nbytes)) {
unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
if (likely(block_len)) {
const u64 *src64;
u64 *dst64;
src64 = (const u64 *)walk.src.virt.addr;
dst64 = (u64 *) walk.dst.virt.addr;
op(src64, dst64, block_len, key,
(u64 *) walk.iv);
}
nbytes &= CAMELLIA_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
fprs_write(0);
return err;
}
static struct crypto_alg algs[] = { {
.cra_name = "camellia",
.cra_driver_name = "camellia-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
.cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
.cia_setkey = camellia_set_key,
.cia_encrypt = camellia_encrypt,
.cia_decrypt = camellia_decrypt
}
}
}, {
.cra_name = "ecb(camellia)",
.cra_driver_name = "ecb-camellia-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.setkey = camellia_set_key,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
}, {
.cra_name = "cbc(camellia)",
.cra_driver_name = "cbc-camellia-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
.setkey = camellia_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
}
};
static bool __init sparc64_has_camellia_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_CAMELLIA))
return false;
return true;
}
static int __init camellia_sparc64_mod_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(algs); i++)
INIT_LIST_HEAD(&algs[i].cra_list);
if (sparc64_has_camellia_opcode()) {
pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
return crypto_register_algs(algs, ARRAY_SIZE(algs));
}
pr_info("sparc64 camellia opcodes not available.\n");
return -ENODEV;
}
static void __exit camellia_sparc64_mod_fini(void)
{
crypto_unregister_algs(algs, ARRAY_SIZE(algs));
}
module_init(camellia_sparc64_mod_init);
module_exit(camellia_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
MODULE_ALIAS("aes");
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include "opcodes.h"
ENTRY(crc32c_sparc64)
/* %o0=crc32p, %o1=data_ptr, %o2=len */
VISEntryHalf
lda [%o0] ASI_PL, %f1
1: ldd [%o1], %f2
CRC32C(0,2,0)
subcc %o2, 8, %o2
bne,pt %icc, 1b
add %o1, 0x8, %o1
sta %f1, [%o0] ASI_PL
VISExitHalf
2: retl
nop
ENDPROC(crc32c_sparc64)
/* Glue code for CRC32C optimized for sparc64 crypto opcodes.
*
* This is based largely upon arch/x86/crypto/crc32c-intel.c
*
* Copyright (C) 2008 Intel Corporation
* Authors: Austin Zhang <austin_zhang@linux.intel.com>
* Kent Liu <kent.liu@intel.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include "opcodes.h"
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_shash_ctx(hash);
if (keylen != sizeof(u32)) {
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*(__le32 *)mctx = le32_to_cpup((__le32 *)key);
return 0;
}
static int crc32c_sparc64_init(struct shash_desc *desc)
{
u32 *mctx = crypto_shash_ctx(desc->tfm);
u32 *crcp = shash_desc_ctx(desc);
*crcp = *mctx;
return 0;
}
extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len);
static void crc32c_compute(u32 *crcp, const u64 *data, unsigned int len)
{
unsigned int asm_len;
asm_len = len & ~7U;
if (asm_len) {
crc32c_sparc64(crcp, data, asm_len);
data += asm_len / 8;
len -= asm_len;
}
if (len)
*crcp = __crc32c_le(*crcp, (const unsigned char *) data, len);
}
static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
u32 *crcp = shash_desc_ctx(desc);
crc32c_compute(crcp, (const u64 *) data, len);
return 0;
}
static int __crc32c_sparc64_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
u32 tmp = *crcp;
crc32c_compute(&tmp, (const u64 *) data, len);
*(__le32 *) out = ~cpu_to_le32(tmp);
return 0;
}
static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out);
}
static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out)
{
u32 *crcp = shash_desc_ctx(desc);
*(__le32 *) out = ~cpu_to_le32p(crcp);
return 0;
}
static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len,
out);
}
static int crc32c_sparc64_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = ~0;
return 0;
}
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
static struct shash_alg alg = {
.setkey = crc32c_sparc64_setkey,
.init = crc32c_sparc64_init,
.update = crc32c_sparc64_update,
.final = crc32c_sparc64_final,
.finup = crc32c_sparc64_finup,
.digest = crc32c_sparc64_digest,
.descsize = sizeof(u32),
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_init = crc32c_sparc64_cra_init,
}
};
static bool __init sparc64_has_crc32c_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_CRC32C))
return false;
return true;
}
static int __init crc32c_sparc64_mod_init(void)
{
if (sparc64_has_crc32c_opcode()) {
pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n");
return crypto_register_shash(&alg);
}
pr_info("sparc64 crc32c opcode not available.\n");
return -ENODEV;
}
static void __exit crc32c_sparc64_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(crc32c_sparc64_mod_init);
module_exit(crc32c_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
MODULE_ALIAS("crc32c");
#include <linux/module.h>
#include <linux/of_device.h>
/* This is a dummy device table linked into all of the crypto
* opcode drivers. It serves to trigger the module autoloading
* mechanisms in userspace which scan the OF device tree and
* load any modules which have device table entries that
* match OF device nodes.
*/
static const struct of_device_id crypto_opcode_match[] = {
{ .name = "cpu", .compatible = "sun4v", },
{},
};
MODULE_DEVICE_TABLE(of, crypto_opcode_match);
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
.align 32
ENTRY(des_sparc64_key_expand)
/* %o0=input_key, %o1=output_key */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
DES_KEXPAND(0, 0, 0)
DES_KEXPAND(0, 1, 2)
DES_KEXPAND(2, 3, 6)
DES_KEXPAND(2, 2, 4)
DES_KEXPAND(6, 3, 10)
DES_KEXPAND(6, 2, 8)
DES_KEXPAND(10, 3, 14)
DES_KEXPAND(10, 2, 12)
DES_KEXPAND(14, 1, 16)
DES_KEXPAND(16, 3, 20)
DES_KEXPAND(16, 2, 18)
DES_KEXPAND(20, 3, 24)
DES_KEXPAND(20, 2, 22)
DES_KEXPAND(24, 3, 28)
DES_KEXPAND(24, 2, 26)
DES_KEXPAND(28, 1, 30)
std %f0, [%o1 + 0x00]
std %f2, [%o1 + 0x08]
std %f4, [%o1 + 0x10]
std %f6, [%o1 + 0x18]
std %f8, [%o1 + 0x20]
std %f10, [%o1 + 0x28]
std %f12, [%o1 + 0x30]
std %f14, [%o1 + 0x38]
std %f16, [%o1 + 0x40]
std %f18, [%o1 + 0x48]
std %f20, [%o1 + 0x50]
std %f22, [%o1 + 0x58]
std %f24, [%o1 + 0x60]
std %f26, [%o1 + 0x68]
std %f28, [%o1 + 0x70]
std %f30, [%o1 + 0x78]
retl
VISExitHalf
ENDPROC(des_sparc64_key_expand)
.align 32
ENTRY(des_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ldd [%o1 + 0x00], %f32
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o2 + 0x00]
retl
VISExit
ENDPROC(des_sparc64_crypt)
.align 32
ENTRY(des_sparc64_load_keys)
/* %o0=key */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
retl
ldd [%o0 + 0x78], %f30
ENDPROC(des_sparc64_load_keys)
.align 32
ENTRY(des_sparc64_ecb_crypt)
/* %o0=input, %o1=output, %o2=len */
1: ldd [%o0 + 0x00], %f32
add %o0, 0x08, %o0
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o1 + 0x00]
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
nop
ENDPROC(des_sparc64_ecb_crypt)
.align 32
ENTRY(des_sparc64_cbc_encrypt)
/* %o0=input, %o1=output, %o2=len, %o3=IV */
ldd [%o3 + 0x00], %f32
1: ldd [%o0 + 0x00], %f34
fxor %f32, %f34, %f32
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o1 + 0x00]
add %o0, 0x08, %o0
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
std %f32, [%o3 + 0x00]
ENDPROC(des_sparc64_cbc_encrypt)
.align 32
ENTRY(des_sparc64_cbc_decrypt)
/* %o0=input, %o1=output, %o2=len, %o3=IV */
ldd [%o3 + 0x00], %f34
1: ldd [%o0 + 0x00], %f36
DES_IP(36, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
fxor %f32, %f34, %f32
fsrc2 %f36, %f34
std %f32, [%o1 + 0x00]
add %o0, 0x08, %o0
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
std %f36, [%o3 + 0x00]
ENDPROC(des_sparc64_cbc_decrypt)
.align 32
ENTRY(des3_ede_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ldd [%o1 + 0x00], %f32
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
ldd [%o0 + 0x80], %f0
ldd [%o0 + 0x88], %f2
DES_ROUND(4, 6, 32, 32)
ldd [%o0 + 0x90], %f4
ldd [%o0 + 0x98], %f6
DES_ROUND(8, 10, 32, 32)
ldd [%o0 + 0xa0], %f8
ldd [%o0 + 0xa8], %f10
DES_ROUND(12, 14, 32, 32)
ldd [%o0 + 0xb0], %f12
ldd [%o0 + 0xb8], %f14
DES_ROUND(16, 18, 32, 32)
ldd [%o0 + 0xc0], %f16
ldd [%o0 + 0xc8], %f18
DES_ROUND(20, 22, 32, 32)
ldd [%o0 + 0xd0], %f20
ldd [%o0 + 0xd8], %f22
DES_ROUND(24, 26, 32, 32)
ldd [%o0 + 0xe0], %f24
ldd [%o0 + 0xe8], %f26
DES_ROUND(28, 30, 32, 32)
ldd [%o0 + 0xf0], %f28
ldd [%o0 + 0xf8], %f30
DES_IIP(32, 32)
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
ldd [%o0 + 0x100], %f0
ldd [%o0 + 0x108], %f2
DES_ROUND(4, 6, 32, 32)
ldd [%o0 + 0x110], %f4
ldd [%o0 + 0x118], %f6
DES_ROUND(8, 10, 32, 32)
ldd [%o0 + 0x120], %f8
ldd [%o0 + 0x128], %f10
DES_ROUND(12, 14, 32, 32)
ldd [%o0 + 0x130], %f12
ldd [%o0 + 0x138], %f14
DES_ROUND(16, 18, 32, 32)
ldd [%o0 + 0x140], %f16
ldd [%o0 + 0x148], %f18
DES_ROUND(20, 22, 32, 32)
ldd [%o0 + 0x150], %f20
ldd [%o0 + 0x158], %f22
DES_ROUND(24, 26, 32, 32)
ldd [%o0 + 0x160], %f24
ldd [%o0 + 0x168], %f26
DES_ROUND(28, 30, 32, 32)
ldd [%o0 + 0x170], %f28
ldd [%o0 + 0x178], %f30
DES_IIP(32, 32)
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o2 + 0x00]
retl
VISExit
ENDPROC(des3_ede_sparc64_crypt)
.align 32
ENTRY(des3_ede_sparc64_load_keys)
/* %o0=key */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
ldd [%o0 + 0x80], %f32
ldd [%o0 + 0x88], %f34
ldd [%o0 + 0x90], %f36
ldd [%o0 + 0x98], %f38
ldd [%o0 + 0xa0], %f40
ldd [%o0 + 0xa8], %f42
ldd [%o0 + 0xb0], %f44
ldd [%o0 + 0xb8], %f46
ldd [%o0 + 0xc0], %f48
ldd [%o0 + 0xc8], %f50
ldd [%o0 + 0xd0], %f52
ldd [%o0 + 0xd8], %f54
ldd [%o0 + 0xe0], %f56
retl
ldd [%o0 + 0xe8], %f58
ENDPROC(des3_ede_sparc64_load_keys)
#define DES3_LOOP_BODY(X) \
DES_IP(X, X) \
DES_ROUND(0, 2, X, X) \
DES_ROUND(4, 6, X, X) \
DES_ROUND(8, 10, X, X) \
DES_ROUND(12, 14, X, X) \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0xf0], %f16; \
ldd [%o0 + 0xf8], %f18; \
DES_ROUND(20, 22, X, X) \
ldd [%o0 + 0x100], %f20; \
ldd [%o0 + 0x108], %f22; \
DES_ROUND(24, 26, X, X) \
ldd [%o0 + 0x110], %f24; \
ldd [%o0 + 0x118], %f26; \
DES_ROUND(28, 30, X, X) \
ldd [%o0 + 0x120], %f28; \
ldd [%o0 + 0x128], %f30; \
DES_IIP(X, X) \
DES_IP(X, X) \
DES_ROUND(32, 34, X, X) \
ldd [%o0 + 0x130], %f0; \
ldd [%o0 + 0x138], %f2; \
DES_ROUND(36, 38, X, X) \
ldd [%o0 + 0x140], %f4; \
ldd [%o0 + 0x148], %f6; \
DES_ROUND(40, 42, X, X) \
ldd [%o0 + 0x150], %f8; \
ldd [%o0 + 0x158], %f10; \
DES_ROUND(44, 46, X, X) \
ldd [%o0 + 0x160], %f12; \
ldd [%o0 + 0x168], %f14; \
DES_ROUND(48, 50, X, X) \
DES_ROUND(52, 54, X, X) \
DES_ROUND(56, 58, X, X) \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0x170], %f16; \
ldd [%o0 + 0x178], %f18; \
DES_IIP(X, X) \
DES_IP(X, X) \
DES_ROUND(20, 22, X, X) \
ldd [%o0 + 0x50], %f20; \
ldd [%o0 + 0x58], %f22; \
DES_ROUND(24, 26, X, X) \
ldd [%o0 + 0x60], %f24; \
ldd [%o0 + 0x68], %f26; \
DES_ROUND(28, 30, X, X) \
ldd [%o0 + 0x70], %f28; \
ldd [%o0 + 0x78], %f30; \
DES_ROUND(0, 2, X, X) \
ldd [%o0 + 0x00], %f0; \
ldd [%o0 + 0x08], %f2; \
DES_ROUND(4, 6, X, X) \
ldd [%o0 + 0x10], %f4; \
ldd [%o0 + 0x18], %f6; \
DES_ROUND(8, 10, X, X) \
ldd [%o0 + 0x20], %f8; \
ldd [%o0 + 0x28], %f10; \
DES_ROUND(12, 14, X, X) \
ldd [%o0 + 0x30], %f12; \
ldd [%o0 + 0x38], %f14; \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0x40], %f16; \
ldd [%o0 + 0x48], %f18; \
DES_IIP(X, X)
.align 32
ENTRY(des3_ede_sparc64_ecb_crypt)
/* %o0=key, %o1=input, %o2=output, %o3=len */
1: ldd [%o1 + 0x00], %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
nop
ENDPROC(des3_ede_sparc64_ecb_crypt)
.align 32
ENTRY(des3_ede_sparc64_cbc_encrypt)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f60
1: ldd [%o1 + 0x00], %f62
fxor %f60, %f62, %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
std %f60, [%o4 + 0x00]
ENDPROC(des3_ede_sparc64_cbc_encrypt)
.align 32
ENTRY(des3_ede_sparc64_cbc_decrypt)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f62
1: ldx [%o1 + 0x00], %g1
MOVXTOD_G1_F60
DES3_LOOP_BODY(60)
fxor %f62, %f60, %f60
MOVXTOD_G1_F62
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
stx %g1, [%o4 + 0x00]
ENDPROC(des3_ede_sparc64_cbc_decrypt)
This diff is collapsed.
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(md5_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
andcc %o1, 0x7, %g0
ld [%o0 + 0x08], %f2
bne,pn %xcc, 10f
ld [%o0 + 0x0c], %f3
1:
ldd [%o1 + 0x00], %f8
ldd [%o1 + 0x08], %f10
ldd [%o1 + 0x10], %f12
ldd [%o1 + 0x18], %f14
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
ldd [%o1 + 0x38], %f22
MD5
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x40, %o1
5:
st %f0, [%o0 + 0x00]
st %f1, [%o0 + 0x04]
st %f2, [%o0 + 0x08]
st %f3, [%o0 + 0x0c]
retl
VISExitHalf
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f10
1:
ldd [%o1 + 0x08], %f12
ldd [%o1 + 0x10], %f14
ldd [%o1 + 0x18], %f16
ldd [%o1 + 0x20], %f18
ldd [%o1 + 0x28], %f20
ldd [%o1 + 0x30], %f22
ldd [%o1 + 0x38], %f24
ldd [%o1 + 0x40], %f26
faligndata %f10, %f12, %f8
faligndata %f12, %f14, %f10
faligndata %f14, %f16, %f12
faligndata %f16, %f18, %f14
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
MD5
subcc %o2, 1, %o2
fsrc2 %f26, %f10
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ba,a,pt %xcc, 5b
ENDPROC(md5_sparc64_transform)
/* Glue code for MD5 hashing optimized for sparc64 crypto opcodes.
*
* This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
* and crypto/md5.c which are:
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/md5.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include "opcodes.h"
asmlinkage void md5_sparc64_transform(u32 *digest, const char *data,
unsigned int rounds);
static int md5_sparc64_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = cpu_to_le32(0x67452301);
mctx->hash[1] = cpu_to_le32(0xefcdab89);
mctx->hash[2] = cpu_to_le32(0x98badcfe);
mctx->hash[3] = cpu_to_le32(0x10325476);
mctx->byte_count = 0;
return 0;
}
static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data,
unsigned int len, unsigned int partial)
{
unsigned int done = 0;
sctx->byte_count += len;
if (partial) {
done = MD5_HMAC_BLOCK_SIZE - partial;
memcpy((u8 *)sctx->block + partial, data, done);
md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1);
}
if (len - done >= MD5_HMAC_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE;
md5_sparc64_transform(sctx->hash, data + done, rounds);
done += rounds * MD5_HMAC_BLOCK_SIZE;
}
memcpy(sctx->block, data + done, len - done);
}
static int md5_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct md5_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
/* Handle the fast case right here */
if (partial + len < MD5_HMAC_BLOCK_SIZE) {
sctx->byte_count += len;
memcpy((u8 *)sctx->block + partial, data, len);
} else
__md5_sparc64_update(sctx, data, len, partial);
return 0;
}
/* Add padding and return the message digest. */
static int md5_sparc64_final(struct shash_desc *desc, u8 *out)
{
struct md5_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
u32 *dst = (u32 *)out;
__le64 bits;
static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_le64(sctx->byte_count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index);
/* We need to fill a whole block for __md5_sparc64_update() */
if (padlen <= 56) {
sctx->byte_count += padlen;
memcpy((u8 *)sctx->block + index, padding, padlen);
} else {
__md5_sparc64_update(sctx, padding, padlen, index);
}
__md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
/* Store state in digest */
for (i = 0; i < MD5_HASH_WORDS; i++)
dst[i] = sctx->hash[i];
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int md5_sparc64_export(struct shash_desc *desc, void *out)
{
struct md5_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int md5_sparc64_import(struct shash_desc *desc, const void *in)
{
struct md5_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_sparc64_init,
.update = md5_sparc64_update,
.final = md5_sparc64_final,
.export = md5_sparc64_export,
.import = md5_sparc64_import,
.descsize = sizeof(struct md5_state),
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static bool __init sparc64_has_md5_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_MD5))
return false;
return true;
}
static int __init md5_sparc64_mod_init(void)
{
if (sparc64_has_md5_opcode()) {
pr_info("Using sparc64 md5 opcode optimized MD5 implementation\n");
return crypto_register_shash(&alg);
}
pr_info("sparc64 md5 opcode not available.\n");
return -ENODEV;
}
static void __exit md5_sparc64_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(md5_sparc64_mod_init);
module_exit(md5_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
MODULE_ALIAS("md5");
#ifndef _OPCODES_H
#define _OPCODES_H
#define SPARC_CR_OPCODE_PRIORITY 300
#define F3F(x,y,z) (((x)<<30)|((y)<<19)|((z)<<5))
#define FPD_ENCODE(x) (((x) >> 5) | ((x) & ~(0x20)))
#define RS1(x) (FPD_ENCODE(x) << 14)
#define RS2(x) (FPD_ENCODE(x) << 0)
#define RS3(x) (FPD_ENCODE(x) << 9)
#define RD(x) (FPD_ENCODE(x) << 25)
#define IMM5_0(x) ((x) << 0)
#define IMM5_9(x) ((x) << 9)
#define CRC32C(a,b,c) \
.word (F3F(2,0x36,0x147)|RS1(a)|RS2(b)|RD(c));
#define MD5 \
.word 0x81b02800;
#define SHA1 \
.word 0x81b02820;
#define SHA256 \
.word 0x81b02840;
#define SHA512 \
.word 0x81b02860;
#define AES_EROUND01(a,b,c,d) \
.word (F3F(2, 0x19, 0)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_EROUND23(a,b,c,d) \
.word (F3F(2, 0x19, 1)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_DROUND01(a,b,c,d) \
.word (F3F(2, 0x19, 2)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_DROUND23(a,b,c,d) \
.word (F3F(2, 0x19, 3)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_EROUND01_L(a,b,c,d) \
.word (F3F(2, 0x19, 4)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_EROUND23_L(a,b,c,d) \
.word (F3F(2, 0x19, 5)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_DROUND01_L(a,b,c,d) \
.word (F3F(2, 0x19, 6)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_DROUND23_L(a,b,c,d) \
.word (F3F(2, 0x19, 7)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define AES_KEXPAND1(a,b,c,d) \
.word (F3F(2, 0x19, 8)|RS1(a)|RS2(b)|IMM5_9(c)|RD(d));
#define AES_KEXPAND0(a,b,c) \
.word (F3F(2, 0x36, 0x130)|RS1(a)|RS2(b)|RD(c));
#define AES_KEXPAND2(a,b,c) \
.word (F3F(2, 0x36, 0x131)|RS1(a)|RS2(b)|RD(c));
#define DES_IP(a,b) \
.word (F3F(2, 0x36, 0x134)|RS1(a)|RD(b));
#define DES_IIP(a,b) \
.word (F3F(2, 0x36, 0x135)|RS1(a)|RD(b));
#define DES_KEXPAND(a,b,c) \
.word (F3F(2, 0x36, 0x136)|RS1(a)|IMM5_0(b)|RD(c));
#define DES_ROUND(a,b,c,d) \
.word (F3F(2, 0x19, 0x009)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define CAMELLIA_F(a,b,c,d) \
.word (F3F(2, 0x19, 0x00c)|RS1(a)|RS2(b)|RS3(c)|RD(d));
#define CAMELLIA_FL(a,b,c) \
.word (F3F(2, 0x36, 0x13c)|RS1(a)|RS2(b)|RD(c));
#define CAMELLIA_FLI(a,b,c) \
.word (F3F(2, 0x36, 0x13d)|RS1(a)|RS2(b)|RD(c));
#define MOVDTOX_F0_O4 \
.word 0x99b02200
#define MOVDTOX_F2_O5 \
.word 0x9bb02202
#define MOVXTOD_G1_F60 \
.word 0xbbb02301
#define MOVXTOD_G1_F62 \
.word 0xbfb02301
#define MOVXTOD_G3_F4 \
.word 0x89b02303;
#define MOVXTOD_G7_F6 \
.word 0x8db02307;
#define MOVXTOD_G3_F0 \
.word 0x81b02303;
#define MOVXTOD_G7_F2 \
.word 0x85b02307;
#define MOVXTOD_O0_F0 \
.word 0x81b02308;
#define MOVXTOD_O5_F0 \
.word 0x81b0230d;
#define MOVXTOD_O5_F2 \
.word 0x85b0230d;
#define MOVXTOD_O5_F4 \
.word 0x89b0230d;
#define MOVXTOD_O5_F6 \
.word 0x8db0230d;
#define MOVXTOD_G3_F60 \
.word 0xbbb02303;
#define MOVXTOD_G7_F62 \
.word 0xbfb02307;
#endif /* _OPCODES_H */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(sha1_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
ld [%o0 + 0x08], %f2
andcc %o1, 0x7, %g0
ld [%o0 + 0x0c], %f3
bne,pn %xcc, 10f
ld [%o0 + 0x10], %f4
1:
ldd [%o1 + 0x00], %f8
ldd [%o1 + 0x08], %f10
ldd [%o1 + 0x10], %f12
ldd [%o1 + 0x18], %f14
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
ldd [%o1 + 0x38], %f22
SHA1
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x40, %o1
5:
st %f0, [%o0 + 0x00]
st %f1, [%o0 + 0x04]
st %f2, [%o0 + 0x08]
st %f3, [%o0 + 0x0c]
st %f4, [%o0 + 0x10]
retl
VISExitHalf
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f10
1:
ldd [%o1 + 0x08], %f12
ldd [%o1 + 0x10], %f14
ldd [%o1 + 0x18], %f16
ldd [%o1 + 0x20], %f18
ldd [%o1 + 0x28], %f20
ldd [%o1 + 0x30], %f22
ldd [%o1 + 0x38], %f24
ldd [%o1 + 0x40], %f26
faligndata %f10, %f12, %f8
faligndata %f12, %f14, %f10
faligndata %f14, %f16, %f12
faligndata %f16, %f18, %f14
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
SHA1
subcc %o2, 1, %o2
fsrc2 %f26, %f10
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ba,a,pt %xcc, 5b
ENDPROC(sha1_sparc64_transform)
/* Glue code for SHA1 hashing optimized for sparc64 crypto opcodes.
*
* This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include "opcodes.h"
asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data,
unsigned int rounds);
static int sha1_sparc64_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data,
unsigned int len, unsigned int partial)
{
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_sparc64_transform(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_sparc64_transform(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->buffer, data + done, len - done);
}
static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
} else
__sha1_sparc64_update(sctx, data, len, partial);
return 0;
}
/* Add padding and return the message digest. */
static int sha1_sparc64_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
/* We need to fill a whole block for __sha1_sparc64_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_sparc64_update(sctx, padding, padlen, index);
}
__sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_sparc64_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_sparc64_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_sparc64_init,
.update = sha1_sparc64_update,
.final = sha1_sparc64_final,
.export = sha1_sparc64_export,
.import = sha1_sparc64_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static bool __init sparc64_has_sha1_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_SHA1))
return false;
return true;
}
static int __init sha1_sparc64_mod_init(void)
{
if (sparc64_has_sha1_opcode()) {
pr_info("Using sparc64 sha1 opcode optimized SHA-1 implementation\n");
return crypto_register_shash(&alg);
}
pr_info("sparc64 sha1 opcode not available.\n");
return -ENODEV;
}
static void __exit sha1_sparc64_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_sparc64_mod_init);
module_exit(sha1_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
MODULE_ALIAS("sha1");
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(sha256_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
ld [%o0 + 0x08], %f2
ld [%o0 + 0x0c], %f3
ld [%o0 + 0x10], %f4
ld [%o0 + 0x14], %f5
andcc %o1, 0x7, %g0
ld [%o0 + 0x18], %f6
bne,pn %xcc, 10f
ld [%o0 + 0x1c], %f7
1:
ldd [%o1 + 0x00], %f8
ldd [%o1 + 0x08], %f10
ldd [%o1 + 0x10], %f12
ldd [%o1 + 0x18], %f14
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
ldd [%o1 + 0x38], %f22
SHA256
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x40, %o1
5:
st %f0, [%o0 + 0x00]
st %f1, [%o0 + 0x04]
st %f2, [%o0 + 0x08]
st %f3, [%o0 + 0x0c]
st %f4, [%o0 + 0x10]
st %f5, [%o0 + 0x14]
st %f6, [%o0 + 0x18]
st %f7, [%o0 + 0x1c]
retl
VISExitHalf
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f10
1:
ldd [%o1 + 0x08], %f12
ldd [%o1 + 0x10], %f14
ldd [%o1 + 0x18], %f16
ldd [%o1 + 0x20], %f18
ldd [%o1 + 0x28], %f20
ldd [%o1 + 0x30], %f22
ldd [%o1 + 0x38], %f24
ldd [%o1 + 0x40], %f26
faligndata %f10, %f12, %f8
faligndata %f12, %f14, %f10
faligndata %f14, %f16, %f12
faligndata %f16, %f18, %f14
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
SHA256
subcc %o2, 1, %o2
fsrc2 %f26, %f10
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ba,a,pt %xcc, 5b
ENDPROC(sha256_sparc64_transform)
This diff is collapsed.
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(sha512_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
andcc %o1, 0x7, %g0
ldd [%o0 + 0x30], %f12
bne,pn %xcc, 10f
ldd [%o0 + 0x38], %f14
1:
ldd [%o1 + 0x00], %f16
ldd [%o1 + 0x08], %f18
ldd [%o1 + 0x10], %f20
ldd [%o1 + 0x18], %f22
ldd [%o1 + 0x20], %f24
ldd [%o1 + 0x28], %f26
ldd [%o1 + 0x30], %f28
ldd [%o1 + 0x38], %f30
ldd [%o1 + 0x40], %f32
ldd [%o1 + 0x48], %f34
ldd [%o1 + 0x50], %f36
ldd [%o1 + 0x58], %f38
ldd [%o1 + 0x60], %f40
ldd [%o1 + 0x68], %f42
ldd [%o1 + 0x70], %f44
ldd [%o1 + 0x78], %f46
SHA512
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x80, %o1
5:
std %f0, [%o0 + 0x00]
std %f2, [%o0 + 0x08]
std %f4, [%o0 + 0x10]
std %f6, [%o0 + 0x18]
std %f8, [%o0 + 0x20]
std %f10, [%o0 + 0x28]
std %f12, [%o0 + 0x30]
std %f14, [%o0 + 0x38]
retl
VISExit
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f18
1:
ldd [%o1 + 0x08], %f20
ldd [%o1 + 0x10], %f22
ldd [%o1 + 0x18], %f24
ldd [%o1 + 0x20], %f26
ldd [%o1 + 0x28], %f28
ldd [%o1 + 0x30], %f30
ldd [%o1 + 0x38], %f32
ldd [%o1 + 0x40], %f34
ldd [%o1 + 0x48], %f36
ldd [%o1 + 0x50], %f38
ldd [%o1 + 0x58], %f40
ldd [%o1 + 0x60], %f42
ldd [%o1 + 0x68], %f44
ldd [%o1 + 0x70], %f46
ldd [%o1 + 0x78], %f48
ldd [%o1 + 0x80], %f50
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
faligndata %f26, %f28, %f24
faligndata %f28, %f30, %f26
faligndata %f30, %f32, %f28
faligndata %f32, %f34, %f30
faligndata %f34, %f36, %f32
faligndata %f36, %f38, %f34
faligndata %f38, %f40, %f36
faligndata %f40, %f42, %f38
faligndata %f42, %f44, %f40
faligndata %f44, %f46, %f42
faligndata %f46, %f48, %f44
faligndata %f48, %f50, %f46
SHA512
subcc %o2, 1, %o2
fsrc2 %f50, %f18
bne,pt %xcc, 1b
add %o1, 0x80, %o1
ba,a,pt %xcc, 5b
ENDPROC(sha512_sparc64_transform)
This diff is collapsed.
......@@ -141,7 +141,8 @@
/* SpitFire and later extended ASIs. The "(III)" marker designates
* UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
* Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
* ASIs, "(4V)" designates SUN4V specific ASIs.
* ASIs, "(4V)" designates SUN4V specific ASIs. "(NG4)" designates SPARC-T4
* and later ASIs.
*/
#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
......@@ -243,6 +244,7 @@
#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/
#define ASI_INTR_R 0x7f /* IRQ vector dispatch read */
#define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */
#define ASI_PIC 0xb0 /* (NG4) PIC registers */
#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */
#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */
#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */
......
......@@ -86,6 +86,15 @@
#define AV_SPARC_IMA 0x00400000 /* integer multiply-add */
#define AV_SPARC_ASI_CACHE_SPARING \
0x00800000 /* cache sparing ASIs available */
#define AV_SPARC_PAUSE 0x01000000 /* PAUSE available */
#define AV_SPARC_CBCOND 0x02000000 /* CBCOND insns available */
/* Solaris decided to enumerate every single crypto instruction type
* in the AT_HWCAP bits. This is wasteful, since if crypto is present,
* you still need to look in the CFR register to see if the opcode is
* really available. So we simply advertise only "crypto" support.
*/
#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */
#define CORE_DUMP_USE_REGSET
......
......@@ -2934,6 +2934,16 @@ extern unsigned long sun4v_reboot_data_set(unsigned long ra,
unsigned long len);
#endif
#define HV_FAST_VT_GET_PERFREG 0x184
#define HV_FAST_VT_SET_PERFREG 0x185
#ifndef __ASSEMBLY__
extern unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
unsigned long *reg_val);
extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
......@@ -2964,6 +2974,7 @@ extern unsigned long sun4v_reboot_data_set(unsigned long ra,
#define HV_GRP_NIU 0x0204
#define HV_GRP_VF_CPU 0x0205
#define HV_GRP_KT_CPU 0x0209
#define HV_GRP_VT_CPU 0x020c
#define HV_GRP_DIAG 0x0300
#ifndef __ASSEMBLY__
......
......@@ -73,6 +73,7 @@ extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
extern void mdesc_populate_present_mask(cpumask_t *mask);
extern void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
extern void sun4v_mdesc_init(void);
......
......@@ -2,8 +2,13 @@
#define __PCR_H
struct pcr_ops {
u64 (*read)(void);
void (*write)(u64);
u64 (*read_pcr)(unsigned long);
void (*write_pcr)(unsigned long, u64);
u64 (*read_pic)(unsigned long);
void (*write_pic)(unsigned long, u64);
u64 (*nmi_picl_value)(unsigned int nmi_hz);
u64 pcr_nmi_enable;
u64 pcr_nmi_disable;
};
extern const struct pcr_ops *pcr_ops;
......@@ -27,21 +32,18 @@ extern void schedule_deferred_pcr_work(void);
#define PCR_N2_SL1_SHIFT 27
#define PCR_N2_OV1 0x80000000
extern unsigned int picl_shift;
/* In order to commonize as much of the implementation as
* possible, we use PICH as our counter. Mostly this is
* to accommodate Niagara-1 which can only count insn cycles
* in PICH.
*/
static inline u64 picl_value(unsigned int nmi_hz)
{
u32 delta = local_cpu_data().clock_tick / (nmi_hz << picl_shift);
return ((u64)((0 - delta) & 0xffffffff)) << 32;
}
extern u64 pcr_enable;
#define PCR_N4_OV 0x00000001 /* PIC overflow */
#define PCR_N4_TOE 0x00000002 /* Trap On Event */
#define PCR_N4_UTRACE 0x00000004 /* Trace user events */
#define PCR_N4_STRACE 0x00000008 /* Trace supervisor events */
#define PCR_N4_HTRACE 0x00000010 /* Trace hypervisor events */
#define PCR_N4_MASK 0x000007e0 /* Event mask */
#define PCR_N4_MASK_SHIFT 5
#define PCR_N4_SL 0x0000f800 /* Event Select */
#define PCR_N4_SL_SHIFT 11
#define PCR_N4_PICNPT 0x00010000 /* PIC non-privileged trap */
#define PCR_N4_PICNHT 0x00020000 /* PIC non-hypervisor trap */
#define PCR_N4_NTC 0x00040000 /* Next-To-Commit wrap */
extern int pcr_arch_init(void);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -45,6 +45,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_NIU, },
{ .group = HV_GRP_VF_CPU, },
{ .group = HV_GRP_KT_CPU, },
{ .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
};
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment