|
|
|
@ -35,7 +35,7 @@
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
|
|
#include "int-util.h"
|
|
|
|
|
#include "common/int-util.h"
|
|
|
|
|
#include "hash-ops.h"
|
|
|
|
|
#include "oaes_lib.h"
|
|
|
|
|
#include "variant2_int_sqrt.h"
|
|
|
|
@ -117,48 +117,74 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
|
|
|
|
|
#define VARIANT2_SHUFFLE_ADD_SSE2(base_ptr, offset) \
|
|
|
|
|
do if (variant >= 2) \
|
|
|
|
|
{ \
|
|
|
|
|
const __m128i chunk1 = _mm_load_si128((__m128i *)((base_ptr) + ((offset) ^ 0x10))); \
|
|
|
|
|
__m128i chunk1 = _mm_load_si128((__m128i *)((base_ptr) + ((offset) ^ 0x10))); \
|
|
|
|
|
const __m128i chunk2 = _mm_load_si128((__m128i *)((base_ptr) + ((offset) ^ 0x20))); \
|
|
|
|
|
const __m128i chunk3 = _mm_load_si128((__m128i *)((base_ptr) + ((offset) ^ 0x30))); \
|
|
|
|
|
_mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x10)), _mm_add_epi64(chunk3, _b1)); \
|
|
|
|
|
_mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x20)), _mm_add_epi64(chunk1, _b)); \
|
|
|
|
|
_mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x30)), _mm_add_epi64(chunk2, _a)); \
|
|
|
|
|
if (variant >= 4) \
|
|
|
|
|
{ \
|
|
|
|
|
chunk1 = _mm_xor_si128(chunk1, chunk2); \
|
|
|
|
|
_c = _mm_xor_si128(_c, chunk3); \
|
|
|
|
|
_c = _mm_xor_si128(_c, chunk1); \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
#define VARIANT2_SHUFFLE_ADD_NEON(base_ptr, offset) \
|
|
|
|
|
do if (variant >= 2) \
|
|
|
|
|
{ \
|
|
|
|
|
const uint64x2_t chunk1 = vld1q_u64(U64((base_ptr) + ((offset) ^ 0x10))); \
|
|
|
|
|
uint64x2_t chunk1 = vld1q_u64(U64((base_ptr) + ((offset) ^ 0x10))); \
|
|
|
|
|
const uint64x2_t chunk2 = vld1q_u64(U64((base_ptr) + ((offset) ^ 0x20))); \
|
|
|
|
|
const uint64x2_t chunk3 = vld1q_u64(U64((base_ptr) + ((offset) ^ 0x30))); \
|
|
|
|
|
vst1q_u64(U64((base_ptr) + ((offset) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(_b1))); \
|
|
|
|
|
vst1q_u64(U64((base_ptr) + ((offset) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(_b))); \
|
|
|
|
|
vst1q_u64(U64((base_ptr) + ((offset) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(_a))); \
|
|
|
|
|
if (variant >= 4) \
|
|
|
|
|
{ \
|
|
|
|
|
chunk1 = veorq_u64(chunk1, chunk2); \
|
|
|
|
|
_c = vreinterpretq_u8_u64(veorq_u64(vreinterpretq_u64_u8(_c), chunk3)); \
|
|
|
|
|
_c = vreinterpretq_u8_u64(veorq_u64(vreinterpretq_u64_u8(_c), chunk1)); \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
#define VARIANT2_PORTABLE_SHUFFLE_ADD(base_ptr, offset) \
|
|
|
|
|
#define VARIANT2_PORTABLE_SHUFFLE_ADD(out, a_, base_ptr, offset) \
|
|
|
|
|
do if (variant >= 2) \
|
|
|
|
|
{ \
|
|
|
|
|
uint64_t* chunk1 = U64((base_ptr) + ((offset) ^ 0x10)); \
|
|
|
|
|
uint64_t* chunk2 = U64((base_ptr) + ((offset) ^ 0x20)); \
|
|
|
|
|
uint64_t* chunk3 = U64((base_ptr) + ((offset) ^ 0x30)); \
|
|
|
|
|
\
|
|
|
|
|
const uint64_t chunk1_old[2] = { chunk1[0], chunk1[1] }; \
|
|
|
|
|
uint64_t chunk1_old[2] = { SWAP64LE(chunk1[0]), SWAP64LE(chunk1[1]) }; \
|
|
|
|
|
const uint64_t chunk2_old[2] = { SWAP64LE(chunk2[0]), SWAP64LE(chunk2[1]) }; \
|
|
|
|
|
const uint64_t chunk3_old[2] = { SWAP64LE(chunk3[0]), SWAP64LE(chunk3[1]) }; \
|
|
|
|
|
\
|
|
|
|
|
uint64_t b1[2]; \
|
|
|
|
|
memcpy_swap64le(b1, b + 16, 2); \
|
|
|
|
|
chunk1[0] = SWAP64LE(SWAP64LE(chunk3[0]) + b1[0]); \
|
|
|
|
|
chunk1[1] = SWAP64LE(SWAP64LE(chunk3[1]) + b1[1]); \
|
|
|
|
|
chunk1[0] = SWAP64LE(chunk3_old[0] + b1[0]); \
|
|
|
|
|
chunk1[1] = SWAP64LE(chunk3_old[1] + b1[1]); \
|
|
|
|
|
\
|
|
|
|
|
uint64_t a0[2]; \
|
|
|
|
|
memcpy_swap64le(a0, a, 2); \
|
|
|
|
|
chunk3[0] = SWAP64LE(SWAP64LE(chunk2[0]) + a0[0]); \
|
|
|
|
|
chunk3[1] = SWAP64LE(SWAP64LE(chunk2[1]) + a0[1]); \
|
|
|
|
|
memcpy_swap64le(a0, a_, 2); \
|
|
|
|
|
chunk3[0] = SWAP64LE(chunk2_old[0] + a0[0]); \
|
|
|
|
|
chunk3[1] = SWAP64LE(chunk2_old[1] + a0[1]); \
|
|
|
|
|
\
|
|
|
|
|
uint64_t b0[2]; \
|
|
|
|
|
memcpy_swap64le(b0, b, 2); \
|
|
|
|
|
chunk2[0] = SWAP64LE(SWAP64LE(chunk1_old[0]) + b0[0]); \
|
|
|
|
|
chunk2[0] = SWAP64LE(chunk1_old[0] + b0[0]); \
|
|
|
|
|
chunk2[1] = SWAP64LE(SWAP64LE(chunk1_old[1]) + b0[1]); \
|
|
|
|
|
if (variant >= 4) \
|
|
|
|
|
{ \
|
|
|
|
|
uint64_t out_copy[2]; \
|
|
|
|
|
memcpy_swap64le(out_copy, out, 2); \
|
|
|
|
|
chunk1_old[0] ^= chunk2_old[0]; \
|
|
|
|
|
chunk1_old[1] ^= chunk2_old[1]; \
|
|
|
|
|
out_copy[0] ^= chunk3_old[0]; \
|
|
|
|
|
out_copy[1] ^= chunk3_old[1]; \
|
|
|
|
|
out_copy[0] ^= chunk1_old[0]; \
|
|
|
|
|
out_copy[1] ^= chunk1_old[1]; \
|
|
|
|
|
memcpy_swap64le(out, out_copy, 2); \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
#define VARIANT2_INTEGER_MATH_DIVISION_STEP(b, ptr) \
|
|
|
|
@ -201,13 +227,13 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#define VARIANT2_2_PORTABLE() \
|
|
|
|
|
if (variant >= 2) { \
|
|
|
|
|
if (variant == 2 || variant == 3) { \
|
|
|
|
|
xor_blocks(long_state + (j ^ 0x10), d); \
|
|
|
|
|
xor_blocks(d, long_state + (j ^ 0x20)); \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define VARIANT2_2() \
|
|
|
|
|
do if (variant >= 2) \
|
|
|
|
|
do if (variant == 2 || variant == 3) \
|
|
|
|
|
{ \
|
|
|
|
|
*U64(hp_state + (j ^ 0x10)) ^= SWAP64LE(hi); \
|
|
|
|
|
*(U64(hp_state + (j ^ 0x10)) + 1) ^= SWAP64LE(lo); \
|
|
|
|
@ -237,15 +263,15 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
|
|
|
|
|
#define VARIANT4_RANDOM_MATH(a, b, r, _b, _b1) \
|
|
|
|
|
do if (variant >= 4) \
|
|
|
|
|
{ \
|
|
|
|
|
uint64_t t; \
|
|
|
|
|
memcpy(&t, b, sizeof(uint64_t)); \
|
|
|
|
|
uint64_t t[2]; \
|
|
|
|
|
memcpy(t, b, sizeof(uint64_t)); \
|
|
|
|
|
\
|
|
|
|
|
if (sizeof(v4_reg) == sizeof(uint32_t)) \
|
|
|
|
|
t ^= SWAP64LE((r[0] + r[1]) | ((uint64_t)(r[2] + r[3]) << 32)); \
|
|
|
|
|
t[0] ^= SWAP64LE((r[0] + r[1]) | ((uint64_t)(r[2] + r[3]) << 32)); \
|
|
|
|
|
else \
|
|
|
|
|
t ^= SWAP64LE((r[0] + r[1]) ^ (r[2] + r[3])); \
|
|
|
|
|
t[0] ^= SWAP64LE((r[0] + r[1]) ^ (r[2] + r[3])); \
|
|
|
|
|
\
|
|
|
|
|
memcpy(b, &t, sizeof(uint64_t)); \
|
|
|
|
|
memcpy(b, t, sizeof(uint64_t)); \
|
|
|
|
|
\
|
|
|
|
|
V4_REG_LOAD(r + 4, a); \
|
|
|
|
|
V4_REG_LOAD(r + 5, (uint64_t*)(a) + 1); \
|
|
|
|
@ -254,6 +280,17 @@ extern void aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *ex
|
|
|
|
|
V4_REG_LOAD(r + 8, (uint64_t*)(_b1) + 1); \
|
|
|
|
|
\
|
|
|
|
|
v4_random_math(code, r); \
|
|
|
|
|
\
|
|
|
|
|
memcpy(t, a, sizeof(uint64_t) * 2); \
|
|
|
|
|
\
|
|
|
|
|
if (sizeof(v4_reg) == sizeof(uint32_t)) { \
|
|
|
|
|
t[0] ^= SWAP64LE(r[2] | ((uint64_t)(r[3]) << 32)); \
|
|
|
|
|
t[1] ^= SWAP64LE(r[0] | ((uint64_t)(r[1]) << 32)); \
|
|
|
|
|
} else { \
|
|
|
|
|
t[0] ^= SWAP64LE(r[2] ^ r[3]); \
|
|
|
|
|
t[1] ^= SWAP64LE(r[0] ^ r[1]); \
|
|
|
|
|
} \
|
|
|
|
|
memcpy(a, t, sizeof(uint64_t) * 2); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -1328,6 +1365,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
{
|
|
|
|
|
uint8_t text[INIT_SIZE_BYTE];
|
|
|
|
|
uint8_t a[AES_BLOCK_SIZE];
|
|
|
|
|
uint8_t a1[AES_BLOCK_SIZE];
|
|
|
|
|
uint8_t b[AES_BLOCK_SIZE * 2];
|
|
|
|
|
uint8_t c[AES_BLOCK_SIZE];
|
|
|
|
|
uint8_t c1[AES_BLOCK_SIZE];
|
|
|
|
@ -1387,10 +1425,10 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
// Iteration 1
|
|
|
|
|
j = state_index(a);
|
|
|
|
|
p = &long_state[j];
|
|
|
|
|
aesb_single_round(p, p, a);
|
|
|
|
|
copy_block(c1, p);
|
|
|
|
|
aesb_single_round(p, c1, a);
|
|
|
|
|
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(long_state, j);
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(c1, a, long_state, j);
|
|
|
|
|
copy_block(p, c1);
|
|
|
|
|
xor_blocks(p, b);
|
|
|
|
|
VARIANT1_1(p);
|
|
|
|
|
|
|
|
|
@ -1399,14 +1437,15 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
p = &long_state[j];
|
|
|
|
|
copy_block(c, p);
|
|
|
|
|
|
|
|
|
|
copy_block(a1, a);
|
|
|
|
|
VARIANT2_PORTABLE_INTEGER_MATH(c, c1);
|
|
|
|
|
VARIANT4_RANDOM_MATH(a, c, r, b, b + AES_BLOCK_SIZE);
|
|
|
|
|
VARIANT4_RANDOM_MATH(a1, c, r, b, b + AES_BLOCK_SIZE);
|
|
|
|
|
mul(c1, c, d);
|
|
|
|
|
VARIANT2_2_PORTABLE();
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(long_state, j);
|
|
|
|
|
sum_half_blocks(a, d);
|
|
|
|
|
swap_blocks(a, c);
|
|
|
|
|
xor_blocks(a, c);
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(c1, a, long_state, j);
|
|
|
|
|
sum_half_blocks(a1, d);
|
|
|
|
|
swap_blocks(a1, c);
|
|
|
|
|
xor_blocks(a1, c);
|
|
|
|
|
VARIANT1_2(U64(c) + 1);
|
|
|
|
|
copy_block(p, c);
|
|
|
|
|
|
|
|
|
@ -1414,6 +1453,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
copy_block(b + AES_BLOCK_SIZE, b);
|
|
|
|
|
}
|
|
|
|
|
copy_block(b, c1);
|
|
|
|
|
copy_block(a, a1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
@ -1534,6 +1574,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
union cn_slow_hash_state state;
|
|
|
|
|
uint8_t text[INIT_SIZE_BYTE];
|
|
|
|
|
uint8_t a[AES_BLOCK_SIZE];
|
|
|
|
|
uint8_t a1[AES_BLOCK_SIZE];
|
|
|
|
|
uint8_t b[AES_BLOCK_SIZE * 2];
|
|
|
|
|
uint8_t c1[AES_BLOCK_SIZE];
|
|
|
|
|
uint8_t c2[AES_BLOCK_SIZE];
|
|
|
|
@ -1577,7 +1618,7 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
j = e2i(a, MEMORY / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
|
|
|
|
|
copy_block(c1, &long_state[j]);
|
|
|
|
|
aesb_single_round(c1, c1, a);
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(long_state, j);
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(c1, a, long_state, j);
|
|
|
|
|
copy_block(&long_state[j], c1);
|
|
|
|
|
xor_blocks(&long_state[j], b);
|
|
|
|
|
assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE) * AES_BLOCK_SIZE);
|
|
|
|
@ -1585,23 +1626,22 @@ void cn_slow_hash(const void *data, size_t length, char *hash, int variant, int
|
|
|
|
|
/* Iteration 2 */
|
|
|
|
|
j = e2i(c1, MEMORY / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
|
|
|
|
|
copy_block(c2, &long_state[j]);
|
|
|
|
|
copy_block(a1, a);
|
|
|
|
|
VARIANT2_PORTABLE_INTEGER_MATH(c2, c1);
|
|
|
|
|
VARIANT4_RANDOM_MATH(a, c2, r, b, b + AES_BLOCK_SIZE);
|
|
|
|
|
VARIANT4_RANDOM_MATH(a1, c2, r, b, b + AES_BLOCK_SIZE);
|
|
|
|
|
mul(c1, c2, d);
|
|
|
|
|
VARIANT2_2_PORTABLE();
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(long_state, j);
|
|
|
|
|
swap_blocks(a, c1);
|
|
|
|
|
sum_half_blocks(c1, d);
|
|
|
|
|
swap_blocks(c1, c2);
|
|
|
|
|
xor_blocks(c1, c2);
|
|
|
|
|
VARIANT2_PORTABLE_SHUFFLE_ADD(c1, a, long_state, j);
|
|
|
|
|
sum_half_blocks(a1, d);
|
|
|
|
|
swap_blocks(a1, c2);
|
|
|
|
|
xor_blocks(a1, c2);
|
|
|
|
|
VARIANT1_2(c2 + 8);
|
|
|
|
|
copy_block(&long_state[j], c2);
|
|
|
|
|
assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE) * AES_BLOCK_SIZE);
|
|
|
|
|
if (variant >= 2) {
|
|
|
|
|
copy_block(b + AES_BLOCK_SIZE, b);
|
|
|
|
|
}
|
|
|
|
|
copy_block(b, a);
|
|
|
|
|
copy_block(a, c1);
|
|
|
|
|
copy_block(b, c1);
|
|
|
|
|
copy_block(a, a1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memcpy(text, state.init, INIT_SIZE_BYTE);
|
|
|
|
|