move x86 asm to intrinsics, e2k aes-ni/avx support

Signed-off-by: contextswap <ctxswp@proton.me>
This commit is contained in:
contextswap 2023-05-04 05:08:49 +09:00
parent a9e9e14c42
commit 1509349fec
No known key found for this signature in database
GPG key ID: 06A0B660A90A787B
4 changed files with 221 additions and 262 deletions

View file

@ -51,6 +51,10 @@ ifneq (, $(DESTDIR))
PREFIX = $(DESTDIR) PREFIX = $(DESTDIR)
endif endif
ifneq (, $(findstring e2k, $(SYS)))
CXX_DEBUG += -Wno-deprecated-declarations
endif
ifneq (, $(findstring darwin, $(SYS))) ifneq (, $(findstring darwin, $(SYS)))
DAEMON_SRC += $(DAEMON_SRC_DIR)/UnixDaemon.cpp DAEMON_SRC += $(DAEMON_SRC_DIR)/UnixDaemon.cpp
ifeq ($(HOMEBREW),1) ifeq ($(HOMEBREW),1)

View file

@ -50,6 +50,14 @@ namespace cpu
} }
} }
#endif // defined(__x86_64__) || defined(__i386__) #endif // defined(__x86_64__) || defined(__i386__)
#ifdef __e2k__
#ifdef __AES__
aesni = true;
#endif
#ifdef __AVX__
avx = true;
#endif
#endif
LogPrint(eLogInfo, "AESNI ", (aesni ? "enabled" : "disabled")); LogPrint(eLogInfo, "AESNI ", (aesni ? "enabled" : "disabled"));
LogPrint(eLogInfo, "AVX ", (avx ? "enabled" : "disabled")); LogPrint(eLogInfo, "AVX ", (avx ? "enabled" : "disabled"));

View file

@ -6,6 +6,8 @@
* See full license text in LICENSE file at top of project tree * See full license text in LICENSE file at top of project tree
*/ */
#include <stdio.h>
#include <string.h> #include <string.h>
#include <string> #include <string>
#include <vector> #include <vector>
@ -16,6 +18,9 @@
#include <openssl/crypto.h> #include <openssl/crypto.h>
#include "TunnelBase.h" #include "TunnelBase.h"
#include <openssl/ssl.h> #include <openssl/ssl.h>
#ifdef __AES__
#include <immintrin.h>
#endif
#if OPENSSL_HKDF #if OPENSSL_HKDF
#include <openssl/kdf.h> #include <openssl/kdf.h>
#endif #endif
@ -555,103 +560,96 @@ namespace crypto
} }
// AES // AES
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
#define KeyExpansion256(round0,round1) \ #define KeyExpansion256(round0, round1) \
"pshufd $0xff, %%xmm2, %%xmm2 \n" \ xmm_2 = _mm_shuffle_epi32(xmm_2, 0xff); \
"movaps %%xmm1, %%xmm4 \n" \ xmm_4 = (__m128i)_mm_load_ps((float const*)&xmm_1); \
"pslldq $4, %%xmm4 \n" \ xmm_4 = _mm_slli_si128(xmm_4, 4); \
"pxor %%xmm4, %%xmm1 \n" \ xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_4); \
"pslldq $4, %%xmm4 \n" \ xmm_4 = _mm_slli_si128(xmm_4, 4); \
"pxor %%xmm4, %%xmm1 \n" \ xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_4); \
"pslldq $4, %%xmm4 \n" \ xmm_4 = _mm_slli_si128(xmm_4, 4); \
"pxor %%xmm4, %%xmm1 \n" \ xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_4); \
"pxor %%xmm2, %%xmm1 \n" \ xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_2); \
"movaps %%xmm1, "#round0"(%[sched]) \n" \ _mm_store_ps((float*)(sched + round0), xmm_1); \
"aeskeygenassist $0, %%xmm1, %%xmm4 \n" \ xmm_4 = _mm_aeskeygenassist_si128((__m128i)xmm_1, 0); \
"pshufd $0xaa, %%xmm4, %%xmm2 \n" \ xmm_2 = _mm_shuffle_epi32(xmm_4, 0xaa); \
"movaps %%xmm3, %%xmm4 \n" \ xmm_3 = _mm_load_ps((float const*)&xmm_4); \
"pslldq $4, %%xmm4 \n" \ xmm_4 = _mm_slli_si128(xmm_4, 4); \
"pxor %%xmm4, %%xmm3 \n" \ xmm_3 = (__m128)_mm_xor_si128((__m128i)xmm_3, xmm_2); \
"pslldq $4, %%xmm4 \n" \ xmm_4 = _mm_slli_si128(xmm_4, 4); \
"pxor %%xmm4, %%xmm3 \n" \ xmm_3 = (__m128)_mm_xor_si128((__m128i)xmm_3, xmm_2); \
"pslldq $4, %%xmm4 \n" \ xmm_4 = _mm_slli_si128(xmm_4, 4); \
"pxor %%xmm4, %%xmm3 \n" \ xmm_3 = (__m128)_mm_xor_si128((__m128i)xmm_3, xmm_2); \
"pxor %%xmm2, %%xmm3 \n" \ _mm_store_ps((float*)(sched + round1), xmm_3);
"movaps %%xmm3, "#round1"(%[sched]) \n"
#endif #endif
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
void ECBCryptoAESNI::ExpandKey (const AESKey& key) void ECBCryptoAESNI::ExpandKey (const AESKey& key)
{ {
__asm__ uint8_t* sched = GetKeySchedule();
( __m128 xmm_1 = _mm_loadu_ps((float const*)&key);
"movups (%[key]), %%xmm1 \n" __m128 xmm_3 = _mm_loadu_ps((float const*)(
"movups 16(%[key]), %%xmm3 \n" (uint8_t*)&key + 0x10));
"movaps %%xmm1, (%[sched]) \n" _mm_store_ps((float*)(sched), xmm_1);
"movaps %%xmm3, 16(%[sched]) \n" _mm_store_ps((float*)(sched + 0x10), xmm_3);
"aeskeygenassist $1, %%xmm3, %%xmm2 \n" __m128i xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 1);
KeyExpansion256(32,48) __m128i xmm_4;
"aeskeygenassist $2, %%xmm3, %%xmm2 \n" KeyExpansion256(32, 48)
KeyExpansion256(64,80) xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 2);
"aeskeygenassist $4, %%xmm3, %%xmm2 \n" KeyExpansion256(64, 80)
KeyExpansion256(96,112) xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 4);
"aeskeygenassist $8, %%xmm3, %%xmm2 \n" KeyExpansion256(96, 112)
KeyExpansion256(128,144) xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 8);
"aeskeygenassist $16, %%xmm3, %%xmm2 \n" KeyExpansion256(128, 144)
KeyExpansion256(160,176) xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 16);
"aeskeygenassist $32, %%xmm3, %%xmm2 \n" KeyExpansion256(160, 176)
KeyExpansion256(192,208) xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 32);
"aeskeygenassist $64, %%xmm3, %%xmm2 \n" KeyExpansion256(192, 208)
// key expansion final xmm_2 = _mm_aeskeygenassist_si128((__m128i)xmm_3, 64);
"pshufd $0xff, %%xmm2, %%xmm2 \n" xmm_2 = _mm_shuffle_epi32(xmm_2, 0xff);
"movaps %%xmm1, %%xmm4 \n" xmm_4 = (__m128i)_mm_load_ps((float const*)&xmm_1);
"pslldq $4, %%xmm4 \n" xmm_4 = _mm_slli_si128(xmm_4, 4);
"pxor %%xmm4, %%xmm1 \n" xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_4);
"pslldq $4, %%xmm4 \n" xmm_4 = _mm_slli_si128(xmm_4, 4);
"pxor %%xmm4, %%xmm1 \n" xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_4);
"pslldq $4, %%xmm4 \n" xmm_4 = _mm_slli_si128(xmm_4, 4);
"pxor %%xmm4, %%xmm1 \n" xmm_1 = (__m128)_mm_xor_si128((__m128i)xmm_1, xmm_4);
"pxor %%xmm2, %%xmm1 \n" xmm_2 = _mm_xor_si128((__m128i)xmm_1, xmm_2);
"movups %%xmm1, 224(%[sched]) \n" _mm_storeu_ps((float*)(sched + 224), xmm_1);
: // output
: [key]"r"((const uint8_t *)key), [sched]"r"(GetKeySchedule ()) // input
: "%xmm1", "%xmm2", "%xmm3", "%xmm4", "memory" // clogged
);
} }
#endif #endif
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__) && defined(__x86_64__)
#define EncryptAES256(sched) \ #define EncryptAES256(sched) \
"pxor (%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, *(__m128i*)sched); \
"aesenc 16(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x10)); \
"aesenc 32(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x20)); \
"aesenc 48(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x30)); \
"aesenc 64(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x40)); \
"aesenc 80(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x50)); \
"aesenc 96(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x60)); \
"aesenc 112(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x70)); \
"aesenc 128(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x80)); \
"aesenc 144(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x90)); \
"aesenc 160(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xa0)); \
"aesenc 176(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xb0)); \
"aesenc 192(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xc0)); \
"aesenc 208(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesenc_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xd0)); \
"aesenclast 224(%["#sched"]), %%xmm0 \n" xmm_0 = (__m128)_mm_aesenclast_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xf0));
#endif #endif
void ECBEncryption::Encrypt (const ChipherBlock * in, ChipherBlock * out) void ECBEncryption::Encrypt (const ChipherBlock * in, ChipherBlock * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_0 = _mm_loadu_ps((float const*)in);
( uint8_t *sched = GetKeySchedule();
"movups (%[in]), %%xmm0 \n"
EncryptAES256(sched) EncryptAES256(sched)
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
: : [sched]"r"(GetKeySchedule ()), [in]"r"(in), [out]"r"(out) : "%xmm0", "memory"
);
} }
else else
#endif #endif
@ -660,37 +658,34 @@ namespace crypto
} }
} }
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__) && defined(__x86_64__)
#define DecryptAES256(sched) \ #define DecryptAES256(sched) \
"pxor 224(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xf0)); \
"aesdec 208(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xd0)); \
"aesdec 192(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xc0)); \
"aesdec 176(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xb0)); \
"aesdec 160(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0xa0)); \
"aesdec 144(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x90)); \
"aesdec 128(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x80)); \
"aesdec 112(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x70)); \
"aesdec 96(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x60)); \
"aesdec 80(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x50)); \
"aesdec 64(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x40)); \
"aesdec 48(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x30)); \
"aesdec 32(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x20)); \
"aesdec 16(%["#sched"]), %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesdec_si128((__m128i)xmm_0, *(__m128i*)(sched + 0x10)); \
"aesdeclast (%["#sched"]), %%xmm0 \n" xmm_0 = (__m128)_mm_aesdeclast_si128((__m128i)xmm_0, *(__m128i*)(sched));
#endif #endif
void ECBDecryption::Decrypt (const ChipherBlock * in, ChipherBlock * out) void ECBDecryption::Decrypt (const ChipherBlock * in, ChipherBlock * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_0 = _mm_loadu_ps((float const*)in);
( uint8_t *sched = GetKeySchedule();
"movups (%[in]), %%xmm0 \n"
DecryptAES256(sched) DecryptAES256(sched)
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
: : [sched]"r"(GetKeySchedule ()), [in]"r"(in), [out]"r"(out) : "%xmm0", "memory"
);
} }
else else
#endif #endif
@ -699,16 +694,16 @@ namespace crypto
} }
} }
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__) && defined(__x86_64__)
#define CallAESIMC(offset) \ #define CallAESIMC(offset) \
"movaps "#offset"(%[shed]), %%xmm0 \n" \ xmm_0 = _mm_load_ps((float const*)(sched + offset)); \
"aesimc %%xmm0, %%xmm0 \n" \ xmm_0 = (__m128)_mm_aesimc_si128((__m128i)xmm_0); \
"movaps %%xmm0, "#offset"(%[shed]) \n" _mm_store_ps((float*)(sched + offset), xmm_0);
#endif #endif
void ECBEncryption::SetKey (const AESKey& key) void ECBEncryption::SetKey (const AESKey& key)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
ExpandKey (key); ExpandKey (key);
@ -722,13 +717,13 @@ namespace crypto
void ECBDecryption::SetKey (const AESKey& key) void ECBDecryption::SetKey (const AESKey& key)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
ExpandKey (key); // expand encryption key first ExpandKey (key); // expand encryption key first
// then invert it using aesimc // then invert it using aesimc
__asm__ uint8_t *sched = GetKeySchedule();
( __m128 xmm_0;
CallAESIMC(16) CallAESIMC(16)
CallAESIMC(32) CallAESIMC(32)
CallAESIMC(48) CallAESIMC(48)
@ -742,8 +737,6 @@ namespace crypto
CallAESIMC(176) CallAESIMC(176)
CallAESIMC(192) CallAESIMC(192)
CallAESIMC(208) CallAESIMC(208)
: : [shed]"r"(GetKeySchedule ()) : "%xmm0", "memory"
);
} }
else else
#endif #endif
@ -754,28 +747,22 @@ namespace crypto
void CBCEncryption::Encrypt (int numBlocks, const ChipherBlock * in, ChipherBlock * out) void CBCEncryption::Encrypt (int numBlocks, const ChipherBlock * in, ChipherBlock * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_1 = _mm_loadu_ps((float const*)&m_LastBlock);
( uint8_t *sched = m_ECBEncryption.GetKeySchedule();
"movups (%[iv]), %%xmm1 \n" __m128 xmm_0;
"1: \n" for (int i = 0; i < numBlocks; i++) {
"movups (%[in]), %%xmm0 \n" xmm_0 = _mm_loadu_ps((float const*)in);
"pxor %%xmm1, %%xmm0 \n" xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, (__m128i)xmm_1);
EncryptAES256(sched) EncryptAES256(sched)
"movaps %%xmm0, %%xmm1 \n" xmm_1 = _mm_load_ps((float const*)&xmm_0);
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float *)out, xmm_0);
"add $16, %[in] \n" in = (ChipherBlock const*)((uint8_t const*)in + 16);
"add $16, %[out] \n" out = (ChipherBlock *)((uint8_t *)out + 16);
"dec %[num] \n" }
"jnz 1b \n" _mm_storeu_ps((float*)&m_LastBlock, xmm_1);
"movups %%xmm1, (%[iv]) \n"
:
: [iv]"r"((uint8_t *)m_LastBlock), [sched]"r"(m_ECBEncryption.GetKeySchedule ()),
[in]"r"(in), [out]"r"(out), [num]"r"(numBlocks)
: "%xmm0", "%xmm1", "cc", "memory"
);
} }
else else
#endif #endif
@ -799,22 +786,16 @@ namespace crypto
void CBCEncryption::Encrypt (const uint8_t * in, uint8_t * out) void CBCEncryption::Encrypt (const uint8_t * in, uint8_t * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_1 = _mm_loadu_ps((float const*)&m_LastBlock);
( __m128 xmm_0 = _mm_loadu_ps((float const*)in);
"movups (%[iv]), %%xmm1 \n" xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, (__m128i)xmm_1);
"movups (%[in]), %%xmm0 \n" uint8_t *sched = m_ECBEncryption.GetKeySchedule();
"pxor %%xmm1, %%xmm0 \n"
EncryptAES256(sched) EncryptAES256(sched)
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float *)out, xmm_0);
"movups %%xmm0, (%[iv]) \n" _mm_storeu_ps((float *)&m_LastBlock, xmm_0);
:
: [iv]"r"((uint8_t *)m_LastBlock), [sched]"r"(m_ECBEncryption.GetKeySchedule ()),
[in]"r"(in), [out]"r"(out)
: "%xmm0", "%xmm1", "memory"
);
} }
else else
#endif #endif
@ -823,29 +804,23 @@ namespace crypto
void CBCDecryption::Decrypt (int numBlocks, const ChipherBlock * in, ChipherBlock * out) void CBCDecryption::Decrypt (int numBlocks, const ChipherBlock * in, ChipherBlock * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_1 = _mm_loadu_ps((float const*)&m_IV);
( __m128 xmm_0, xmm_2;
"movups (%[iv]), %%xmm1 \n" uint8_t *sched = m_ECBDecryption.GetKeySchedule();
"1: \n" for (int i = 0; i < numBlocks; i++) {
"movups (%[in]), %%xmm0 \n" xmm_0 = _mm_loadu_ps((float const*)in);
"movaps %%xmm0, %%xmm2 \n" xmm_2 = _mm_load_ps((float const*)&xmm_0);
DecryptAES256(sched) DecryptAES256(sched);
"pxor %%xmm1, %%xmm0 \n" xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, (__m128i)xmm_1);
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
"movaps %%xmm2, %%xmm1 \n" xmm_1 = _mm_load_ps((float const*)&xmm_2);
"add $16, %[in] \n" in = (ChipherBlock const*)((uint8_t const*)in + 16);
"add $16, %[out] \n" out = (ChipherBlock *)((uint8_t *)out + 16);
"dec %[num] \n" }
"jnz 1b \n" _mm_storeu_ps((float*)&m_IV, xmm_1);
"movups %%xmm1, (%[iv]) \n"
:
: [iv]"r"((uint8_t *)m_IV), [sched]"r"(m_ECBDecryption.GetKeySchedule ()),
[in]"r"(in), [out]"r"(out), [num]"r"(numBlocks)
: "%xmm0", "%xmm1", "%xmm2", "cc", "memory"
);
} }
else else
#endif #endif
@ -869,22 +844,16 @@ namespace crypto
void CBCDecryption::Decrypt (const uint8_t * in, uint8_t * out) void CBCDecryption::Decrypt (const uint8_t * in, uint8_t * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_1 = _mm_load_ps((float const*)&m_IV);
( __m128 xmm_0 = _mm_load_ps((float const*)in);
"movups (%[iv]), %%xmm1 \n" _mm_store_ps((float*)&m_IV, xmm_0);
"movups (%[in]), %%xmm0 \n" uint8_t *sched = m_ECBDecryption.GetKeySchedule();
"movups %%xmm0, (%[iv]) \n"
DecryptAES256(sched) DecryptAES256(sched)
"pxor %%xmm1, %%xmm0 \n" xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, (__m128i)xmm_1);
"movups %%xmm0, (%[out]) \n" _mm_store_ps((float*)out, xmm_0);
:
: [iv]"r"((uint8_t *)m_IV), [sched]"r"(m_ECBDecryption.GetKeySchedule ()),
[in]"r"(in), [out]"r"(out)
: "%xmm0", "%xmm1", "memory"
);
} }
else else
#endif #endif
@ -893,34 +862,24 @@ namespace crypto
void TunnelEncryption::Encrypt (const uint8_t * in, uint8_t * out) void TunnelEncryption::Encrypt (const uint8_t * in, uint8_t * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_0 = _mm_loadu_ps((float const*)in);
( uint8_t *sched_iv = m_IVEncryption.GetKeySchedule(),
// encrypt IV *sched_l = m_LayerEncryption.ECB().GetKeySchedule();
"movups (%[in]), %%xmm0 \n"
EncryptAES256(sched_iv) EncryptAES256(sched_iv)
"movaps %%xmm0, %%xmm1 \n" __m128 xmm_1 = _mm_load_ps((float const*)&xmm_0);
// double IV encryption
EncryptAES256(sched_iv) EncryptAES256(sched_iv)
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
// encrypt data, IV is xmm1 for (int i=0;i<63/*blocks=1008bytes*/;i++) {
"1: \n" in += 16, out += 16;
"add $16, %[in] \n" xmm_0 = _mm_loadu_ps((float const*)in);
"add $16, %[out] \n" xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, (__m128i)xmm_1);
"movups (%[in]), %%xmm0 \n"
"pxor %%xmm1, %%xmm0 \n"
EncryptAES256(sched_l) EncryptAES256(sched_l)
"movaps %%xmm0, %%xmm1 \n" xmm_1 = _mm_load_ps((float const*)&xmm_0);
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
"dec %[num] \n" }
"jnz 1b \n"
:
: [sched_iv]"r"(m_IVEncryption.GetKeySchedule ()), [sched_l]"r"(m_LayerEncryption.ECB().GetKeySchedule ()),
[in]"r"(in), [out]"r"(out), [num]"r"(63) // 63 blocks = 1008 bytes
: "%xmm0", "%xmm1", "cc", "memory"
);
} }
else else
#endif #endif
@ -934,35 +893,26 @@ namespace crypto
void TunnelDecryption::Decrypt (const uint8_t * in, uint8_t * out) void TunnelDecryption::Decrypt (const uint8_t * in, uint8_t * out)
{ {
#if defined(__AES__) && (defined(__x86_64__) || defined(__i386__)) #if defined(__AES__)
if(i2p::cpu::aesni) if(i2p::cpu::aesni)
{ {
__asm__ __m128 xmm_0 = _mm_loadu_ps((float const*)in);
( uint8_t *sched_iv = m_IVDecryption.GetKeySchedule(),
// decrypt IV *sched_l = m_LayerDecryption.ECB().GetKeySchedule();
"movups (%[in]), %%xmm0 \n"
DecryptAES256(sched_iv) DecryptAES256(sched_iv)
"movaps %%xmm0, %%xmm1 \n" __m128 xmm_1 = _mm_load_ps((float const*)&xmm_0);
// double IV encryption
DecryptAES256(sched_iv) DecryptAES256(sched_iv)
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
// decrypt data, IV is xmm1 __m128 xmm_2;
"1: \n" for (int i = 0; i < 63/*blocks = 1008 bytes*/; i++) {
"add $16, %[in] \n" in += 16, out += 16;
"add $16, %[out] \n" xmm_0 = _mm_loadu_ps((float const*)in);
"movups (%[in]), %%xmm0 \n" _mm_store_ps((float*)&xmm_2, xmm_0);
"movaps %%xmm0, %%xmm2 \n"
DecryptAES256(sched_l) DecryptAES256(sched_l)
"pxor %%xmm1, %%xmm0 \n" xmm_0 = (__m128)_mm_xor_si128((__m128i)xmm_0, (__m128i)xmm_1);
"movups %%xmm0, (%[out]) \n" _mm_storeu_ps((float*)out, xmm_0);
"movaps %%xmm2, %%xmm1 \n" xmm_1 = _mm_load_ps((float const*)&xmm_2);
"dec %[num] \n" }
"jnz 1b \n"
:
: [sched_iv]"r"(m_IVDecryption.GetKeySchedule ()), [sched_l]"r"(m_LayerDecryption.ECB().GetKeySchedule ()),
[in]"r"(in), [out]"r"(out), [num]"r"(63) // 63 blocks = 1008 bytes
: "%xmm0", "%xmm1", "%xmm2", "cc", "memory"
);
} }
else else
#endif #endif

View file

@ -11,6 +11,9 @@
#include "Log.h" #include "Log.h"
#include "Timestamp.h" #include "Timestamp.h"
#include "Identity.h" #include "Identity.h"
#ifdef __AVX__
#include <immintrin.h>
#endif
namespace i2p namespace i2p
{ {
@ -803,19 +806,13 @@ namespace data
XORMetric operator^(const IdentHash& key1, const IdentHash& key2) XORMetric operator^(const IdentHash& key1, const IdentHash& key2)
{ {
XORMetric m; XORMetric m;
#if (defined(__x86_64__) || defined(__i386__)) && defined(__AVX__) // not all X86 targets supports AVX (like old Pentium, see #1600) #if defined(__AVX__) // not all X86 targets supports AVX (like old Pentium, see #1600)
if(i2p::cpu::avx) if(i2p::cpu::avx)
{ {
__asm__ __m256 ymm_0 = _mm256_loadu_ps((float const*)&key1);
( __m256 ymm_1 = _mm256_loadu_ps((float const*)&key2);
"vmovups %1, %%ymm0 \n" ymm_1 = _mm256_xor_ps(ymm_1, ymm_0);
"vmovups %2, %%ymm1 \n" _mm256_storeu_ps((float*)m.metric, ymm_1);
"vxorps %%ymm0, %%ymm1, %%ymm1 \n"
"vmovups %%ymm1, %0 \n"
: "=m"(*m.metric)
: "m"(*key1), "m"(*key2)
: "memory", "%xmm0", "%xmm1" // should be replaced by %ymm0/1 once supported by compiler
);
} }
else else
#endif #endif