kernel_optimize_test/arch/arm64/crypto/sha256-glue.c
Linus Torvalds a78208e243 Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
 "API:
   - Removed CRYPTO_TFM_RES flags
   - Extended spawn grabbing to all algorithm types
   - Moved hash descsize verification into API code

  Algorithms:
   - Fixed recursive pcrypt dead-lock
   - Added new 32 and 64-bit generic versions of poly1305
   - Added cryptogams implementation of x86/poly1305

  Drivers:
   - Added support for i.MX8M Mini in caam
   - Added support for i.MX8M Nano in caam
   - Added support for i.MX8M Plus in caam
   - Added support for A33 variant of SS in sun4i-ss
   - Added TEE support for Raven Ridge in ccp
   - Added in-kernel API to submit TEE commands in ccp
   - Added AMD-TEE driver
   - Added support for BCM2711 in iproc-rng200
   - Added support for AES256-GCM based ciphers for chtls
   - Added aead support on SEC2 in hisilicon"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (244 commits)
  crypto: arm/chacha - fix build failured when kernel mode NEON is disabled
  crypto: caam - add support for i.MX8M Plus
  crypto: x86/poly1305 - emit does base conversion itself
  crypto: hisilicon - fix spelling mistake "disgest" -> "digest"
  crypto: chacha20poly1305 - add back missing test vectors and test chunking
  crypto: x86/poly1305 - fix .gitignore typo
  tee: fix memory allocation failure checks on drv_data and amdtee
  crypto: ccree - erase unneeded inline funcs
  crypto: ccree - make cc_pm_put_suspend() void
  crypto: ccree - split overloaded usage of irq field
  crypto: ccree - fix PM race condition
  crypto: ccree - fix FDE descriptor sequence
  crypto: ccree - cc_do_send_request() is void func
  crypto: ccree - fix pm wrongful error reporting
  crypto: ccree - turn errors to debug msgs
  crypto: ccree - fix AEAD decrypt auth fail
  crypto: ccree - fix typo in comment
  crypto: ccree - fix typos in error msgs
  crypto: atmel-{aes,sha,tdes} - Retire crypto_platform_data
  crypto: x86/sha - Eliminate casts on asm implementations
  ...
2020-01-28 15:38:56 -08:00

197 lines
5.4 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
*
* Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha224");
MODULE_ALIAS_CRYPTO("sha256");
asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);
static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
int blocks)
{
sha256_block_data_order(sst->state, src, blocks);
}
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);
static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
int blocks)
{
sha256_block_neon(sst->state, src, blocks);
}
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update(desc, data, len,
__sha256_block_data_order);
}
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (len)
sha256_base_do_update(desc, data, len,
__sha256_block_data_order);
sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}
static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
{
return crypto_sha256_arm64_finup(desc, NULL, 0, out);
}
static struct shash_alg algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = crypto_sha256_arm64_update,
.final = crypto_sha256_arm64_final,
.finup = crypto_sha256_arm64_finup,
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64",
.base.cra_priority = 125,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = crypto_sha256_arm64_update,
.final = crypto_sha256_arm64_final,
.finup = crypto_sha256_arm64_finup,
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64",
.base.cra_priority = 125,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
__sha256_block_data_order);
while (len > 0) {
unsigned int chunk = len;
/*
* Don't hog the CPU for the entire time it takes to process all
* input when running on a preemptible kernel, but process the
* data block by block instead.
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
chunk = SHA256_BLOCK_SIZE -
sctx->count % SHA256_BLOCK_SIZE;
kernel_neon_begin();
sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
}
return 0;
}
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
__sha256_block_data_order);
sha256_base_do_finalize(desc, __sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
sha256_base_do_finalize(desc, __sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
}
static int sha256_final_neon(struct shash_desc *desc, u8 *out)
{
return sha256_finup_neon(desc, NULL, 0, out);
}
static struct shash_alg neon_algs[] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = sha256_update_neon,
.final = sha256_final_neon,
.finup = sha256_finup_neon,
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64-neon",
.base.cra_priority = 150,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = sha256_update_neon,
.final = sha256_final_neon,
.finup = sha256_finup_neon,
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64-neon",
.base.cra_priority = 150,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
static int __init sha256_mod_init(void)
{
int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
if (ret)
return ret;
if (cpu_have_named_feature(ASIMD)) {
ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
if (ret)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
return ret;
}
static void __exit sha256_mod_fini(void)
{
if (cpu_have_named_feature(ASIMD))
crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha256_mod_init);
module_exit(sha256_mod_fini);