Crypto library fixes for v6.19-rc1
Fixes for some recent regressions as well as some longstanding issues:
- Fix incorrect output from the arm64 NEON implementation of GHASH
- Merge the ksimd scopes in the arm64 XTS code to reduce stack usage
- Roll up the BLAKE2b round loop on 32-bit kernels to greatly reduce
code size and stack usage
- Add missing RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS dependency
- Fix chacha-riscv64-zvkb.S to not use frame pointer for data
-----BEGIN PGP SIGNATURE-----
iIoEABYIADIWIQSacvsUNc7UX4ntmEPzXCl4vpKOKwUCaTuQ6hQcZWJpZ2dlcnNA
a2VybmVsLm9yZwAKCRDzXCl4vpKOK9wsAQCdhMvzStncAZ6j/NU6On6naWyHu0i/
FWEstWGZrG6I7AD/ewrX4Dl9I6r/nKfpWxTna5MdssCbyDTMav1mHxOztgw=
=VAoK
-----END PGP SIGNATURE-----
Merge tag 'libcrypto-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux
Pull crypto library fixes from Eric Biggers:
"Fixes for some recent regressions as well as some longstanding issues:
- Fix incorrect output from the arm64 NEON implementation of GHASH
- Merge the ksimd scopes in the arm64 XTS code to reduce stack usage
- Roll up the BLAKE2b round loop on 32-bit kernels to greatly reduce
code size and stack usage
- Add missing RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS dependency
- Fix chacha-riscv64-zvkb.S to not use frame pointer for data"
* tag 'libcrypto-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux:
crypto: arm64/ghash - Fix incorrect output from ghash-neon
crypto/arm64: sm4/xts - Merge ksimd scopes to reduce stack bloat
crypto/arm64: aes/xts - Use single ksimd scope to reduce stack bloat
lib/crypto: blake2s: Replace manual unrolling with unrolled_full
lib/crypto: blake2b: Roll up BLAKE2b round loop on 32-bit
lib/crypto: riscv: Depend on RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
lib/crypto: riscv/chacha: Avoid s0/fp register
master
commit
187d080140
|
|
@ -549,38 +549,37 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req)
|
|||
tail = 0;
|
||||
}
|
||||
|
||||
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
|
||||
int nbytes = walk.nbytes;
|
||||
scoped_ksimd() {
|
||||
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
if (walk.nbytes < walk.total)
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
if (walk.nbytes < walk.total)
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
|
||||
scoped_ksimd()
|
||||
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key1.key_enc, rounds, nbytes,
|
||||
ctx->key2.key_enc, walk.iv, first);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
if (err || likely(!tail))
|
||||
return err;
|
||||
if (err || likely(!tail))
|
||||
return err;
|
||||
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
||||
|
||||
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
|
||||
err = skcipher_walk_virt(&walk, &subreq, false);
|
||||
if (err)
|
||||
return err;
|
||||
err = skcipher_walk_virt(&walk, &subreq, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
scoped_ksimd()
|
||||
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key1.key_enc, rounds, walk.nbytes,
|
||||
ctx->key2.key_enc, walk.iv, first);
|
||||
|
||||
}
|
||||
return skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
|
|
@ -619,39 +618,37 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
|
|||
tail = 0;
|
||||
}
|
||||
|
||||
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
|
||||
int nbytes = walk.nbytes;
|
||||
scoped_ksimd() {
|
||||
for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
if (walk.nbytes < walk.total)
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
if (walk.nbytes < walk.total)
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
|
||||
scoped_ksimd()
|
||||
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key1.key_dec, rounds, nbytes,
|
||||
ctx->key2.key_enc, walk.iv, first);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
if (err || likely(!tail))
|
||||
return err;
|
||||
if (err || likely(!tail))
|
||||
return err;
|
||||
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
||||
|
||||
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
|
||||
err = skcipher_walk_virt(&walk, &subreq, false);
|
||||
if (err)
|
||||
return err;
|
||||
err = skcipher_walk_virt(&walk, &subreq, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
||||
scoped_ksimd()
|
||||
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key1.key_dec, rounds, walk.nbytes,
|
||||
ctx->key2.key_enc, walk.iv, first);
|
||||
|
||||
}
|
||||
return skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -312,13 +312,13 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
|
||||
out = walk.dst.virt.addr;
|
||||
in = walk.src.virt.addr;
|
||||
nbytes = walk.nbytes;
|
||||
scoped_ksimd() {
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
|
||||
out = walk.dst.virt.addr;
|
||||
in = walk.src.virt.addr;
|
||||
nbytes = walk.nbytes;
|
||||
|
||||
scoped_ksimd() {
|
||||
if (blocks >= 8) {
|
||||
if (first == 1)
|
||||
neon_aes_ecb_encrypt(walk.iv, walk.iv,
|
||||
|
|
@ -344,30 +344,28 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
|
|||
ctx->twkey, walk.iv, first);
|
||||
nbytes = first = 0;
|
||||
}
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
if (err || likely(!tail))
|
||||
return err;
|
||||
if (err || likely(!tail))
|
||||
return err;
|
||||
|
||||
/* handle ciphertext stealing */
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
||||
/* handle ciphertext stealing */
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
||||
|
||||
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
if (err)
|
||||
return err;
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
out = walk.dst.virt.addr;
|
||||
in = walk.src.virt.addr;
|
||||
nbytes = walk.nbytes;
|
||||
out = walk.dst.virt.addr;
|
||||
in = walk.src.virt.addr;
|
||||
nbytes = walk.nbytes;
|
||||
|
||||
scoped_ksimd() {
|
||||
if (encrypt)
|
||||
neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
|
||||
ctx->key.rounds, nbytes, ctx->twkey,
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ static int ghash_finup(struct shash_desc *desc, const u8 *src,
|
|||
u8 buf[GHASH_BLOCK_SIZE] = {};
|
||||
|
||||
memcpy(buf, src, len);
|
||||
ghash_do_simd_update(1, ctx->digest, src, key, NULL,
|
||||
ghash_do_simd_update(1, ctx->digest, buf, key, NULL,
|
||||
pmull_ghash_update_p8);
|
||||
memzero_explicit(buf, sizeof(buf));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -346,11 +346,11 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
|
|||
tail = 0;
|
||||
}
|
||||
|
||||
while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
|
||||
if (nbytes < walk.total)
|
||||
nbytes &= ~(SM4_BLOCK_SIZE - 1);
|
||||
scoped_ksimd() {
|
||||
while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
|
||||
if (nbytes < walk.total)
|
||||
nbytes &= ~(SM4_BLOCK_SIZE - 1);
|
||||
|
||||
scoped_ksimd() {
|
||||
if (encrypt)
|
||||
sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, walk.iv, nbytes,
|
||||
|
|
@ -359,32 +359,30 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
|
|||
sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, walk.iv, nbytes,
|
||||
rkey2_enc);
|
||||
|
||||
rkey2_enc = NULL;
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
rkey2_enc = NULL;
|
||||
if (likely(tail == 0))
|
||||
return 0;
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
/* handle ciphertext stealing */
|
||||
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
|
||||
|
||||
skcipher_request_set_crypt(&subreq, src, dst,
|
||||
SM4_BLOCK_SIZE + tail, req->iv);
|
||||
|
||||
err = skcipher_walk_virt(&walk, &subreq, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (likely(tail == 0))
|
||||
return 0;
|
||||
|
||||
/* handle ciphertext stealing */
|
||||
|
||||
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
|
||||
if (req->dst != req->src)
|
||||
dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
|
||||
|
||||
skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail,
|
||||
req->iv);
|
||||
|
||||
err = skcipher_walk_virt(&walk, &subreq, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
scoped_ksimd() {
|
||||
if (encrypt)
|
||||
sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, walk.iv, walk.nbytes,
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@ menu "Accelerated Cryptographic Algorithms for CPU (riscv)"
|
|||
|
||||
config CRYPTO_AES_RISCV64
|
||||
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS"
|
||||
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_SKCIPHER
|
||||
|
|
@ -20,7 +21,8 @@ config CRYPTO_AES_RISCV64
|
|||
|
||||
config CRYPTO_GHASH_RISCV64
|
||||
tristate "Hash functions: GHASH"
|
||||
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
select CRYPTO_GCM
|
||||
help
|
||||
GCM GHASH function (NIST SP 800-38D)
|
||||
|
|
@ -30,7 +32,8 @@ config CRYPTO_GHASH_RISCV64
|
|||
|
||||
config CRYPTO_SM3_RISCV64
|
||||
tristate "Hash functions: SM3 (ShangMi 3)"
|
||||
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_LIB_SM3
|
||||
help
|
||||
|
|
@ -42,7 +45,8 @@ config CRYPTO_SM3_RISCV64
|
|||
|
||||
config CRYPTO_SM4_RISCV64
|
||||
tristate "Ciphers: SM4 (ShangMi 4)"
|
||||
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_SM4
|
||||
help
|
||||
|
|
|
|||
|
|
@ -61,7 +61,8 @@ config CRYPTO_LIB_CHACHA_ARCH
|
|||
default y if ARM64 && KERNEL_MODE_NEON
|
||||
default y if MIPS && CPU_MIPS32_R2
|
||||
default y if PPC64 && CPU_LITTLE_ENDIAN && VSX
|
||||
default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
default y if S390
|
||||
default y if X86_64
|
||||
|
||||
|
|
@ -184,7 +185,8 @@ config CRYPTO_LIB_SHA256_ARCH
|
|||
default y if ARM64
|
||||
default y if MIPS && CPU_CAVIUM_OCTEON
|
||||
default y if PPC && SPE
|
||||
default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
default y if S390
|
||||
default y if SPARC64
|
||||
default y if X86_64
|
||||
|
|
@ -202,7 +204,8 @@ config CRYPTO_LIB_SHA512_ARCH
|
|||
default y if ARM && !CPU_V7M
|
||||
default y if ARM64
|
||||
default y if MIPS && CPU_CAVIUM_OCTEON
|
||||
default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
|
||||
default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
|
||||
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
|
||||
default y if S390
|
||||
default y if SPARC64
|
||||
default y if X86_64
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
|
|||
|
||||
obj-$(CONFIG_CRYPTO_LIB_BLAKE2B) += libblake2b.o
|
||||
libblake2b-y := blake2b.o
|
||||
CFLAGS_blake2b.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930
|
||||
ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2B_ARCH),y)
|
||||
CFLAGS_blake2b.o += -I$(src)/$(SRCARCH)
|
||||
libblake2b-$(CONFIG_ARM) += arm/blake2b-neon-core.o
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/unroll.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
static const u8 blake2b_sigma[12][16] = {
|
||||
|
|
@ -73,31 +74,26 @@ blake2b_compress_generic(struct blake2b_ctx *ctx,
|
|||
b = ror64(b ^ c, 63); \
|
||||
} while (0)
|
||||
|
||||
#define ROUND(r) do { \
|
||||
G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
|
||||
G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
|
||||
G(r, 2, v[2], v[ 6], v[10], v[14]); \
|
||||
G(r, 3, v[3], v[ 7], v[11], v[15]); \
|
||||
G(r, 4, v[0], v[ 5], v[10], v[15]); \
|
||||
G(r, 5, v[1], v[ 6], v[11], v[12]); \
|
||||
G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
|
||||
G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
|
||||
} while (0)
|
||||
ROUND(0);
|
||||
ROUND(1);
|
||||
ROUND(2);
|
||||
ROUND(3);
|
||||
ROUND(4);
|
||||
ROUND(5);
|
||||
ROUND(6);
|
||||
ROUND(7);
|
||||
ROUND(8);
|
||||
ROUND(9);
|
||||
ROUND(10);
|
||||
ROUND(11);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* Unroll the rounds loop to enable constant-folding of the
|
||||
* blake2b_sigma values. Seems worthwhile on 64-bit kernels.
|
||||
* Not worthwhile on 32-bit kernels because the code size is
|
||||
* already so large there due to BLAKE2b using 64-bit words.
|
||||
*/
|
||||
unrolled_full
|
||||
#endif
|
||||
for (int r = 0; r < 12; r++) {
|
||||
G(r, 0, v[0], v[4], v[8], v[12]);
|
||||
G(r, 1, v[1], v[5], v[9], v[13]);
|
||||
G(r, 2, v[2], v[6], v[10], v[14]);
|
||||
G(r, 3, v[3], v[7], v[11], v[15]);
|
||||
G(r, 4, v[0], v[5], v[10], v[15]);
|
||||
G(r, 5, v[1], v[6], v[11], v[12]);
|
||||
G(r, 6, v[2], v[7], v[8], v[13]);
|
||||
G(r, 7, v[3], v[4], v[9], v[14]);
|
||||
}
|
||||
#undef G
|
||||
#undef ROUND
|
||||
|
||||
for (i = 0; i < 8; ++i)
|
||||
ctx->h[i] ^= v[i] ^ v[i + 8];
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/unroll.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
static const u8 blake2s_sigma[10][16] = {
|
||||
|
|
@ -71,29 +72,22 @@ blake2s_compress_generic(struct blake2s_ctx *ctx,
|
|||
b = ror32(b ^ c, 7); \
|
||||
} while (0)
|
||||
|
||||
#define ROUND(r) do { \
|
||||
G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
|
||||
G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
|
||||
G(r, 2, v[2], v[ 6], v[10], v[14]); \
|
||||
G(r, 3, v[3], v[ 7], v[11], v[15]); \
|
||||
G(r, 4, v[0], v[ 5], v[10], v[15]); \
|
||||
G(r, 5, v[1], v[ 6], v[11], v[12]); \
|
||||
G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
|
||||
G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
|
||||
} while (0)
|
||||
ROUND(0);
|
||||
ROUND(1);
|
||||
ROUND(2);
|
||||
ROUND(3);
|
||||
ROUND(4);
|
||||
ROUND(5);
|
||||
ROUND(6);
|
||||
ROUND(7);
|
||||
ROUND(8);
|
||||
ROUND(9);
|
||||
|
||||
/*
|
||||
* Unroll the rounds loop to enable constant-folding of the
|
||||
* blake2s_sigma values.
|
||||
*/
|
||||
unrolled_full
|
||||
for (int r = 0; r < 10; r++) {
|
||||
G(r, 0, v[0], v[4], v[8], v[12]);
|
||||
G(r, 1, v[1], v[5], v[9], v[13]);
|
||||
G(r, 2, v[2], v[6], v[10], v[14]);
|
||||
G(r, 3, v[3], v[7], v[11], v[15]);
|
||||
G(r, 4, v[0], v[5], v[10], v[15]);
|
||||
G(r, 5, v[1], v[6], v[11], v[12]);
|
||||
G(r, 6, v[2], v[7], v[8], v[13]);
|
||||
G(r, 7, v[3], v[4], v[9], v[14]);
|
||||
}
|
||||
#undef G
|
||||
#undef ROUND
|
||||
|
||||
for (i = 0; i < 8; ++i)
|
||||
ctx->h[i] ^= v[i] ^ v[i + 8];
|
||||
|
|
|
|||
|
|
@ -60,7 +60,8 @@
|
|||
#define VL t2
|
||||
#define STRIDE t3
|
||||
#define ROUND_CTR t4
|
||||
#define KEY0 s0
|
||||
#define KEY0 t5
|
||||
// Avoid s0/fp to allow for unwinding
|
||||
#define KEY1 s1
|
||||
#define KEY2 s2
|
||||
#define KEY3 s3
|
||||
|
|
@ -143,7 +144,6 @@
|
|||
// The updated 32-bit counter is written back to state->x[12] before returning.
|
||||
SYM_FUNC_START(chacha_zvkb)
|
||||
addi sp, sp, -96
|
||||
sd s0, 0(sp)
|
||||
sd s1, 8(sp)
|
||||
sd s2, 16(sp)
|
||||
sd s3, 24(sp)
|
||||
|
|
@ -280,7 +280,6 @@ SYM_FUNC_START(chacha_zvkb)
|
|||
bnez NBLOCKS, .Lblock_loop
|
||||
|
||||
sw COUNTER, 48(STATEP)
|
||||
ld s0, 0(sp)
|
||||
ld s1, 8(sp)
|
||||
ld s2, 16(sp)
|
||||
ld s3, 24(sp)
|
||||
|
|
|
|||
Loading…
Reference in New Issue