diff --git a/crypto/Kconfig b/crypto/Kconfig index 8880c1fc51d8..455a3354e291 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -306,6 +306,11 @@ config CRYPTO_AEGIS128 help Support for the AEGIS-128 dedicated AEAD algorithm. +config CRYPTO_AEGIS128_SIMD + bool "Support SIMD acceleration for AEGIS-128" + depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) + default y + config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" depends on X86 && 64BIT diff --git a/crypto/Makefile b/crypto/Makefile index 92e985714ff6..99a9fa9087d1 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -91,6 +91,18 @@ obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o aegis128-y := aegis128-core.o + +ifeq ($(ARCH),arm) +CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp +CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8 +aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o +endif +ifeq ($(ARCH),arm64) +CFLAGS_aegis128-neon-inner.o += -ffreestanding -mcpu=generic+crypto +CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only +aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o +endif + obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c new file mode 100644 index 000000000000..3d8043c4832b --- /dev/null +++ b/crypto/aegis128-neon-inner.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 Linaro, Ltd. + */ + +#ifdef CONFIG_ARM64 +#include + +#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b" +#else +#include + +#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0" +#endif + +#define AEGIS_BLOCK_SIZE 16 + +#include + +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); + +struct aegis128_state { + uint8x16_t v[5]; +}; + +static struct aegis128_state aegis128_load_state_neon(const void *state) +{ + return (struct aegis128_state){ { + vld1q_u8(state), + vld1q_u8(state + 16), + vld1q_u8(state + 32), + vld1q_u8(state + 48), + vld1q_u8(state + 64) + } }; +} + +static void aegis128_save_state_neon(struct aegis128_state st, void *state) +{ + vst1q_u8(state, st.v[0]); + vst1q_u8(state + 16, st.v[1]); + vst1q_u8(state + 32, st.v[2]); + vst1q_u8(state + 48, st.v[3]); + vst1q_u8(state + 64, st.v[4]); +} + +static inline __attribute__((always_inline)) +uint8x16_t aegis_aes_round(uint8x16_t w) +{ + uint8x16_t z = {}; + + /* + * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics + * to force the compiler to issue the aese/aesmc instructions in pairs. + * This is much faster on many cores, where the instruction pair can + * execute in a single cycle. + */ + asm(AES_ROUND : "+w"(w) : "w"(z)); + return w; +} + +static inline __attribute__((always_inline)) +struct aegis128_state aegis128_update_neon(struct aegis128_state st, + uint8x16_t m) +{ + m ^= aegis_aes_round(st.v[4]); + st.v[4] ^= aegis_aes_round(st.v[3]); + st.v[3] ^= aegis_aes_round(st.v[2]); + st.v[2] ^= aegis_aes_round(st.v[1]); + st.v[1] ^= aegis_aes_round(st.v[0]); + st.v[0] ^= m; + + return st; +} + +void crypto_aegis128_update_neon(void *state, const void *msg) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + + st = aegis128_update_neon(st, vld1q_u8(msg)); + + aegis128_save_state_neon(st, state); +} + +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + uint8x16_t msg; + + while (size >= AEGIS_BLOCK_SIZE) { + uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + + msg = vld1q_u8(src); + st = aegis128_update_neon(st, msg); + vst1q_u8(dst, msg ^ s); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + + if (size > 0) { + uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + uint8_t buf[AEGIS_BLOCK_SIZE] = {}; + + memcpy(buf, src, size); + msg = vld1q_u8(buf); + st = aegis128_update_neon(st, msg); + vst1q_u8(buf, msg ^ s); + memcpy(dst, buf, size); + } + + aegis128_save_state_neon(st, state); +} + +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + uint8x16_t msg; + + while (size >= AEGIS_BLOCK_SIZE) { + msg = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + st = aegis128_update_neon(st, msg); + vst1q_u8(dst, msg); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + + if (size > 0) { + uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + uint8_t buf[AEGIS_BLOCK_SIZE]; + + vst1q_u8(buf, s); + memcpy(buf, src, size); + msg = vld1q_u8(buf) ^ s; + vst1q_u8(buf, msg); + memcpy(dst, buf, size); + + st = aegis128_update_neon(st, msg); + } + + aegis128_save_state_neon(st, state); +} diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c new file mode 100644 index 000000000000..c1c0a1686f67 --- /dev/null +++ b/crypto/aegis128-neon.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 Linaro Ltd + */ + +#include +#include + +#include "aegis.h" + +void crypto_aegis128_update_neon(void *state, const void *msg); +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); + +bool crypto_aegis128_have_simd(void) +{ + return cpu_have_feature(cpu_feature(AES)); +} + +void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) +{ + kernel_neon_begin(); + crypto_aegis128_update_neon(state, msg); + kernel_neon_end(); +} + +void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, + const u8 *src, unsigned int size) +{ + kernel_neon_begin(); + crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); + kernel_neon_end(); +} + +void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, + const u8 *src, unsigned int size) +{ + kernel_neon_begin(); + crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); + kernel_neon_end(); +}