@ -11,143 +11,429 @@
# include "hash-ops.h"
# include "hash-ops.h"
# include "oaes_lib.h"
# include "oaes_lib.h"
static void ( * const extra_hashes [ 4 ] ) ( const void * , size_t , char * ) = {
# include <emmintrin.h>
hash_extra_blake , hash_extra_groestl , hash_extra_jh , hash_extra_skein
} ;
# if defined(_MSC_VER)
# include <intrin.h>
# include <Windows.h>
# define STATIC
# define INLINE __inline
# if !defined(RDATA_ALIGN16)
# define RDATA_ALIGN16 __declspec(align(16))
# endif
# else
# include <wmmintrin.h>
# include <sys/mman.h>
# define STATIC static
# define INLINE inline
# if !defined(RDATA_ALIGN16)
# define RDATA_ALIGN16 __attribute__ ((aligned(16)))
# endif
# endif
# define MEMORY (1 << 21) /* 2 MiB */
# if defined(__INTEL_COMPILER)
# define ASM __asm__
# elif !defined(_MSC_VER)
# define ASM __asm__
# else
# define ASM __asm
# endif
# define MEMORY (1 << 21) // 2MB scratchpad
# define ITER (1 << 20)
# define ITER (1 << 20)
# define AES_BLOCK_SIZE 16
# define AES_BLOCK_SIZE 16
# define AES_KEY_SIZE 32 /*16*/
# define AES_KEY_SIZE 32
# define INIT_SIZE_BLK 8
# define INIT_SIZE_BLK 8
# define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
# define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
# define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
static size_t e2i ( const uint8_t * a , size_t count ) { return ( * ( ( uint64_t * ) a ) / AES_BLOCK_SIZE ) & ( count - 1 ) ; }
# define U64(x) ((uint64_t *) (x))
# define R128(x) ((__m128i *) (x))
# define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a)))
static void mul ( const uint8_t * a , const uint8_t * b , uint8_t * res ) {
# define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4)
uint64_t a0 , b0 ;
# if defined(_MSC_VER)
uint64_t hi , lo ;
# define __mul() lo = _umul128(c[0], b[0], &hi);
# else
# define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc");
# endif
# define pre_aes() \
j = state_index ( a ) ; \
_c = _mm_load_si128 ( R128 ( & hp_state [ j ] ) ) ; \
_a = _mm_load_si128 ( R128 ( a ) ) ; \
// dga's optimized scratchpad twiddling
# define post_aes() \
_mm_store_si128 ( R128 ( c ) , _c ) ; \
_b = _mm_xor_si128 ( _b , _c ) ; \
_mm_store_si128 ( R128 ( & hp_state [ j ] ) , _b ) ; \
j = state_index ( c ) ; \
p = U64 ( & hp_state [ j ] ) ; \
b [ 0 ] = p [ 0 ] ; b [ 1 ] = p [ 1 ] ; \
__mul ( ) ; \
a [ 0 ] + = hi ; a [ 1 ] + = lo ; \
p = U64 ( & hp_state [ j ] ) ; \
p [ 0 ] = a [ 0 ] ; p [ 1 ] = a [ 1 ] ; \
a [ 0 ] ^ = b [ 0 ] ; a [ 1 ] ^ = b [ 1 ] ; \
_b = _c ; \
# if defined(_MSC_VER)
# define THREADV __declspec(thread)
# else
# define THREADV __thread
# endif
extern int aesb_single_round ( const uint8_t * in , uint8_t * out , const uint8_t * expandedKey ) ;
extern int aesb_pseudo_round ( const uint8_t * in , uint8_t * out , const uint8_t * expandedKey ) ;
# pragma pack(push, 1)
union cn_slow_hash_state
{
union hash_state hs ;
struct
{
uint8_t k [ 64 ] ;
uint8_t init [ INIT_SIZE_BYTE ] ;
} ;
} ;
# pragma pack(pop)
THREADV uint8_t * hp_state = NULL ;
THREADV int hp_allocated = 0 ;
# if defined(_MSC_VER)
# define cpuid(info,x) __cpuidex(info,x,0)
# else
void cpuid ( int CPUInfo [ 4 ] , int InfoType )
{
ASM __volatile__
(
" cpuid " :
" =a " ( CPUInfo [ 0 ] ) ,
" =b " ( CPUInfo [ 1 ] ) ,
" =c " ( CPUInfo [ 2 ] ) ,
" =d " ( CPUInfo [ 3 ] ) :
" a " ( InfoType ) , " c " ( 0 )
) ;
}
# endif
STATIC INLINE void xor_blocks ( uint8_t * a , const uint8_t * b )
{
U64 ( a ) [ 0 ] ^ = U64 ( b ) [ 0 ] ;
U64 ( a ) [ 1 ] ^ = U64 ( b ) [ 1 ] ;
}
STATIC INLINE int check_aes_hw ( void )
{
int cpuid_results [ 4 ] ;
static int supported = - 1 ;
if ( supported > = 0 )
return supported ;
a0 = SWAP64LE ( ( ( uint64_t * ) a ) [ 0 ] ) ;
cpuid ( cpuid_results , 1 ) ;
b0 = SWAP64LE ( ( ( uint64_t * ) b ) [ 0 ] ) ;
return supported = cpuid_results [ 2 ] & ( 1 < < 25 ) ;
lo = mul128 ( a0 , b0 , & hi ) ;
( ( uint64_t * ) res ) [ 0 ] = SWAP64LE ( hi ) ;
( ( uint64_t * ) res ) [ 1 ] = SWAP64LE ( lo ) ;
}
}
static void sum_half_blocks ( uint8_t * a , const uint8_t * b ) {
STATIC INLINE void aes_256_assist1 ( __m128i * t1 , __m128i * t2 )
uint64_t a0 , a1 , b0 , b1 ;
{
__m128i t4 ;
a0 = SWAP64LE ( ( ( uint64_t * ) a ) [ 0 ] ) ;
* t2 = _mm_shuffle_epi32 ( * t2 , 0xff ) ;
a1 = SWAP64LE ( ( ( uint64_t * ) a ) [ 1 ] ) ;
t4 = _mm_slli_si128 ( * t1 , 0x04 ) ;
b0 = SWAP64LE ( ( ( uint64_t * ) b ) [ 0 ] ) ;
* t1 = _mm_xor_si128 ( * t1 , t4 ) ;
b1 = SWAP64LE ( ( ( uint64_t * ) b ) [ 1 ] ) ;
t4 = _mm_slli_si128 ( t4 , 0x04 ) ;
a0 + = b0 ;
* t1 = _mm_xor_si128 ( * t1 , t4 ) ;
a1 + = b1 ;
t4 = _mm_slli_si128 ( t4 , 0x04 ) ;
( ( uint64_t * ) a ) [ 0 ] = SWAP64LE ( a0 ) ;
* t1 = _mm_xor_si128 ( * t1 , t4 ) ;
( ( uint64_t * ) a ) [ 1 ] = SWAP64LE ( a1 ) ;
* t1 = _mm_xor_si128 ( * t1 , * t2 ) ;
}
}
static void copy_block ( uint8_t * dst , const uint8_t * src ) {
STATIC INLINE void aes_256_assist2 ( __m128i * t1 , __m128i * t3 )
memcpy ( dst , src , AES_BLOCK_SIZE ) ;
{
__m128i t2 , t4 ;
t4 = _mm_aeskeygenassist_si128 ( * t1 , 0x00 ) ;
t2 = _mm_shuffle_epi32 ( t4 , 0xaa ) ;
t4 = _mm_slli_si128 ( * t3 , 0x04 ) ;
* t3 = _mm_xor_si128 ( * t3 , t4 ) ;
t4 = _mm_slli_si128 ( t4 , 0x04 ) ;
* t3 = _mm_xor_si128 ( * t3 , t4 ) ;
t4 = _mm_slli_si128 ( t4 , 0x04 ) ;
* t3 = _mm_xor_si128 ( * t3 , t4 ) ;
* t3 = _mm_xor_si128 ( * t3 , t2 ) ;
}
}
static void swap_blocks ( uint8_t * a , uint8_t * b ) {
STATIC INLINE void aes_expand_key ( const uint8_t * key , uint8_t * expandedKey )
size_t i ;
{
uint8_t t ;
__m128i * ek = R128 ( expandedKey ) ;
for ( i = 0 ; i < AES_BLOCK_SIZE ; i + + ) {
__m128i t1 , t2 , t3 ;
t = a [ i ] ;
a [ i ] = b [ i ] ;
t1 = _mm_loadu_si128 ( R128 ( key ) ) ;
b [ i ] = t ;
t3 = _mm_loadu_si128 ( R128 ( key + 16 ) ) ;
ek [ 0 ] = t1 ;
ek [ 1 ] = t3 ;
t2 = _mm_aeskeygenassist_si128 ( t3 , 0x01 ) ;
aes_256_assist1 ( & t1 , & t2 ) ;
ek [ 2 ] = t1 ;
aes_256_assist2 ( & t1 , & t3 ) ;
ek [ 3 ] = t3 ;
t2 = _mm_aeskeygenassist_si128 ( t3 , 0x02 ) ;
aes_256_assist1 ( & t1 , & t2 ) ;
ek [ 4 ] = t1 ;
aes_256_assist2 ( & t1 , & t3 ) ;
ek [ 5 ] = t3 ;
t2 = _mm_aeskeygenassist_si128 ( t3 , 0x04 ) ;
aes_256_assist1 ( & t1 , & t2 ) ;
ek [ 6 ] = t1 ;
aes_256_assist2 ( & t1 , & t3 ) ;
ek [ 7 ] = t3 ;
t2 = _mm_aeskeygenassist_si128 ( t3 , 0x08 ) ;
aes_256_assist1 ( & t1 , & t2 ) ;
ek [ 8 ] = t1 ;
aes_256_assist2 ( & t1 , & t3 ) ;
ek [ 9 ] = t3 ;
t2 = _mm_aeskeygenassist_si128 ( t3 , 0x10 ) ;
aes_256_assist1 ( & t1 , & t2 ) ;
ek [ 10 ] = t1 ;
}
STATIC INLINE void aes_pseudo_round ( const uint8_t * in , uint8_t * out ,
const uint8_t * expandedKey , int nblocks )
{
__m128i * k = R128 ( expandedKey ) ;
__m128i d ;
int i ;
for ( i = 0 ; i < nblocks ; i + + )
{
d = _mm_loadu_si128 ( R128 ( in + i * AES_BLOCK_SIZE ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 0 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 1 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 2 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 3 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 4 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 5 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 6 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 7 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 8 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 9 ] ) ) ;
_mm_storeu_si128 ( ( R128 ( out + i * AES_BLOCK_SIZE ) ) , d ) ;
}
}
}
}
static void xor_blocks ( uint8_t * a , const uint8_t * b ) {
STATIC INLINE void aes_pseudo_round_xor ( const uint8_t * in , uint8_t * out ,
size_t i ;
const uint8_t * expandedKey , const uint8_t * xor , int nblocks )
for ( i = 0 ; i < AES_BLOCK_SIZE ; i + + ) {
{
a [ i ] ^ = b [ i ] ;
__m128i * k = R128 ( expandedKey ) ;
__m128i * x = R128 ( xor ) ;
__m128i d ;
int i ;
for ( i = 0 ; i < nblocks ; i + + )
{
d = _mm_loadu_si128 ( R128 ( in + i * AES_BLOCK_SIZE ) ) ;
d = _mm_xor_si128 ( d , * R128 ( x + + ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 0 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 1 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 2 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 3 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 4 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 5 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 6 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 7 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 8 ] ) ) ;
d = _mm_aesenc_si128 ( d , * R128 ( & k [ 9 ] ) ) ;
_mm_storeu_si128 ( ( R128 ( out + i * AES_BLOCK_SIZE ) ) , d ) ;
}
}
}
}
# pragma pack(push, 1)
# if defined(_MSC_VER)
union cn_slow_hash_state {
BOOL SetLockPagesPrivilege ( HANDLE hProcess , BOOL bEnable )
union hash_state hs ;
{
struct {
struct
uint8_t k [ 64 ] ;
{
uint8_t init [ INIT_SIZE_BYTE ] ;
DWORD count ;
} ;
LUID_AND_ATTRIBUTES privilege [ 1 ] ;
} ;
} info ;
# pragma pack(pop)
HANDLE token ;
if ( ! OpenProcessToken ( hProcess , TOKEN_ADJUST_PRIVILEGES , & token ) )
return FALSE ;
info . count = 1 ;
info . privilege [ 0 ] . Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0 ;
if ( ! LookupPrivilegeValue ( NULL , SE_LOCK_MEMORY_NAME , & ( info . privilege [ 0 ] . Luid ) ) )
return FALSE ;
if ( ! AdjustTokenPrivileges ( token , FALSE , ( PTOKEN_PRIVILEGES ) & info , 0 , NULL , NULL ) )
return FALSE ;
if ( GetLastError ( ) ! = ERROR_SUCCESS )
return FALSE ;
CloseHandle ( token ) ;
return TRUE ;
}
# endif
void slow_hash_allocate_state ( void )
{
int state = 0 ;
if ( hp_state ! = NULL )
return ;
# if defined(_MSC_VER)
SetLockPagesPrivilege ( GetCurrentProcess ( ) , TRUE ) ;
hp_state = ( uint8_t * ) VirtualAlloc ( hp_state , MEMORY , MEM_LARGE_PAGES |
MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ;
# else
hp_state = mmap ( 0 , MEMORY , PROT_READ | PROT_WRITE ,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB , 0 , 0 ) ;
if ( hp_state = = MAP_FAILED )
hp_state = NULL ;
# endif
hp_allocated = 1 ;
if ( hp_state = = NULL )
{
hp_allocated = 0 ;
hp_state = ( uint8_t * ) malloc ( MEMORY ) ;
}
}
void slow_hash_free_state ( void )
{
if ( hp_state = = NULL )
return ;
if ( ! hp_allocated )
free ( hp_state ) ;
else
{
# if defined(_MSC_VER)
VirtualFree ( hp_state , MEMORY , MEM_RELEASE ) ;
# else
munmap ( hp_state , MEMORY ) ;
# endif
}
hp_state = NULL ;
hp_allocated = 0 ;
}
void cn_slow_hash ( const void * data , size_t length , char * hash )
{
RDATA_ALIGN16 uint8_t expandedKey [ 240 ] ;
void cn_slow_hash ( const void * data , size_t length , char * hash ) {
uint8_t long_state [ MEMORY ] ;
union cn_slow_hash_state state ;
uint8_t text [ INIT_SIZE_BYTE ] ;
uint8_t text [ INIT_SIZE_BYTE ] ;
uint8_t a [ AES_BLOCK_SIZE ] ;
RDATA_ALIGN16 uint64_t a [ 2 ] ;
uint8_t b [ AES_BLOCK_SIZE ] ;
RDATA_ALIGN16 uint64_t b [ 2 ] ;
uint8_t c [ AES_BLOCK_SIZE ] ;
RDATA_ALIGN16 uint64_t c [ 2 ] ;
uint8_t d [ AES_BLOCK_SIZE ] ;
RDATA_ALIGN16 uint8_t aes_key [ AES_KEY_SIZE ] ;
union cn_slow_hash_state state ;
__m128i _a , _b , _c ;
uint64_t hi , lo ;
size_t i , j ;
size_t i , j ;
uint8_t aes_key [ AES_KEY_SIZE ] ;
uint64_t * p = NULL ;
OAES_CTX * aes_ctx ;
oaes_ctx * aes_ctx ;
int useAes = check_aes_hw ( ) ;
static void ( * const extra_hashes [ 4 ] ) ( const void * , size_t , char * ) =
{
hash_extra_blake , hash_extra_groestl , hash_extra_jh , hash_extra_skein
} ;
// this isn't supposed to happen, but guard against it for now.
if ( hp_state = = NULL )
slow_hash_allocate_state ( ) ;
hash_process ( & state . hs , data , length ) ;
hash_process ( & state . hs , data , length ) ;
memcpy ( text , state . init , INIT_SIZE_BYTE ) ;
memcpy ( text , state . init , INIT_SIZE_BYTE ) ;
memcpy ( aes_key , state . hs . b , AES_KEY_SIZE ) ;
aes_ctx = oaes_alloc ( ) ;
if ( useAes )
for ( i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i + + ) {
{
for ( j = 0 ; j < INIT_SIZE_BLK ; j + + ) {
aes_expand_key ( state . hs . b , expandedKey ) ;
oaes_key_import_data ( aes_ctx , aes_key , AES_KEY_SIZE ) ;
for ( i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i + + )
oaes_pseudo_encrypt_ecb ( aes_ctx , & text [ AES_BLOCK_SIZE * j ] ) ;
{
/*memcpy(aes_key, &text[AES_BLOCK_SIZE * j], AES_KEY_SIZE);*/
aes_pseudo_round ( text , text , expandedKey , INIT_SIZE_BLK ) ;
memcpy ( aes_key , state . hs . b , AES_KEY_SIZE ) ;
memcpy ( & hp_state [ i * INIT_SIZE_BYTE ] , text , INIT_SIZE_BYTE ) ;
}
}
memcpy ( & long_state [ i * INIT_SIZE_BYTE ] , text , INIT_SIZE_BYTE ) ;
}
}
else
{
for ( i = 0 ; i < 16 ; i + + ) {
aes_ctx = ( oaes_ctx * ) oaes_alloc ( ) ;
a [ i ] = state . k [ i ] ^ state . k [ 32 + i ] ;
oaes_key_import_data ( aes_ctx , state . hs . b , AES_KEY_SIZE ) ;
b [ i ] = state . k [ 16 + i ] ^ state . k [ 48 + i ] ;
for ( i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i + + )
}
{
for ( j = 0 ; j < INIT_SIZE_BLK ; j + + )
for ( i = 0 ; i < ITER / 2 ; i + + ) {
aesb_pseudo_round ( & text [ AES_BLOCK_SIZE * j ] , & text [ AES_BLOCK_SIZE * j ] , aes_ctx - > key - > exp_data ) ;
/* Dependency chain: address -> read value ------+
* written value < - + hard function ( AES or MUL ) < +
memcpy ( & hp_state [ i * INIT_SIZE_BYTE ] , text , INIT_SIZE_BYTE ) ;
* next address < - +
}
*/
}
/* Iteration 1 */
j = e2i ( a , MEMORY / AES_BLOCK_SIZE ) ;
U64 ( a ) [ 0 ] = U64 ( & state . k [ 0 ] ) [ 0 ] ^ U64 ( & state . k [ 32 ] ) [ 0 ] ;
copy_block ( c , & long_state [ j * AES_BLOCK_SIZE ] ) ;
U64 ( a ) [ 1 ] = U64 ( & state . k [ 0 ] ) [ 1 ] ^ U64 ( & state . k [ 32 ] ) [ 1 ] ;
oaes_encryption_round ( a , c ) ;
U64 ( b ) [ 0 ] = U64 ( & state . k [ 16 ] ) [ 0 ] ^ U64 ( & state . k [ 48 ] ) [ 0 ] ;
xor_blocks ( b , c ) ;
U64 ( b ) [ 1 ] = U64 ( & state . k [ 16 ] ) [ 1 ] ^ U64 ( & state . k [ 48 ] ) [ 1 ] ;
swap_blocks ( b , c ) ;
copy_block ( & long_state [ j * AES_BLOCK_SIZE ] , c ) ;
_b = _mm_load_si128 ( R128 ( b ) ) ;
assert ( j = = e2i ( a , MEMORY / AES_BLOCK_SIZE ) ) ;
// this is ugly but the branching affects the loop somewhat so put it outside.
swap_blocks ( a , b ) ;
if ( useAes )
/* Iteration 2 */
{
j = e2i ( a , MEMORY / AES_BLOCK_SIZE ) ;
for ( i = 0 ; i < ITER / 2 ; i + + )
copy_block ( c , & long_state [ j * AES_BLOCK_SIZE ] ) ;
{
mul ( a , c , d ) ;
pre_aes ( ) ;
sum_half_blocks ( b , d ) ;
_c = _mm_aesenc_si128 ( _c , _a ) ;
swap_blocks ( b , c ) ;
// post_aes(), optimized scratchpad twiddling (credits to dga)
xor_blocks ( b , c ) ;
post_aes ( ) ;
copy_block ( & long_state [ j * AES_BLOCK_SIZE ] , c ) ;
}
assert ( j = = e2i ( a , MEMORY / AES_BLOCK_SIZE ) ) ;
}
swap_blocks ( a , b ) ;
else
{
for ( i = 0 ; i < ITER / 2 ; i + + )
{
pre_aes ( ) ;
aesb_single_round ( ( uint8_t * ) & _c , ( uint8_t * ) & _c , ( uint8_t * ) & _a ) ;
post_aes ( ) ;
}
}
}
memcpy ( text , state . init , INIT_SIZE_BYTE ) ;
memcpy ( text , state . init , INIT_SIZE_BYTE ) ;
for ( i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i + + ) {
if ( useAes )
for ( j = 0 ; j < INIT_SIZE_BLK ; j + + ) {
{
/*oaes_key_import_data(aes_ctx, &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE], AES_KEY_SIZE);*/
aes_expand_key ( & state . hs . b [ 32 ] , expandedKey ) ;
for ( i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i + + )
{
// add the xor to the pseudo round
aes_pseudo_round_xor ( text , text , expandedKey , & hp_state [ i * INIT_SIZE_BYTE ] , INIT_SIZE_BLK ) ;
}
}
else
{
oaes_key_import_data ( aes_ctx , & state . hs . b [ 32 ] , AES_KEY_SIZE ) ;
oaes_key_import_data ( aes_ctx , & state . hs . b [ 32 ] , AES_KEY_SIZE ) ;
xor_blocks ( & text [ j * AES_BLOCK_SIZE ] , & long_state [ i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE ] ) ;
for ( i = 0 ; i < MEMORY / INIT_SIZE_BYTE ; i + + )
oaes_pseudo_encrypt_ecb ( aes_ctx , & text [ j * AES_BLOCK_SIZE ] ) ;
{
for ( j = 0 ; j < INIT_SIZE_BLK ; j + + )
{
xor_blocks ( & text [ j * AES_BLOCK_SIZE ] , & hp_state [ i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE ] ) ;
aesb_pseudo_round ( & text [ AES_BLOCK_SIZE * j ] , & text [ AES_BLOCK_SIZE * j ] , aes_ctx - > key - > exp_data ) ;
}
}
}
}
oaes_free ( ( OAES_CTX * * ) & aes_ctx ) ;
}
memcpy ( state . init , text , INIT_SIZE_BYTE ) ;
memcpy ( state . init , text , INIT_SIZE_BYTE ) ;
hash_permutation ( & state . hs ) ;
hash_permutation ( & state . hs ) ;
/*memcpy(hash, &state, 32);*/
extra_hashes [ state . hs . b [ 0 ] & 3 ] ( & state , 200 , hash ) ;
extra_hashes [ state . hs . b [ 0 ] & 3 ] ( & state , 200 , hash ) ;
oaes_free ( & aes_ctx ) ;
}
}