-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathgost3411-2012-sse41.h
151 lines (133 loc) · 5.18 KB
/
gost3411-2012-sse41.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
/*
* Copyright (c) 2013, Alexey Degtyarev.
* All rights reserved.
*
* $Id: gost3411-2012-sse41.h 526 2013-05-26 18:24:29Z alexey $
*
* Modified by Michele Pes 2017-03-01
*/
#ifndef GOST_3411_2012_CORE_SSE41_H_
#define GOST_3411_2012_CORE_SSE41_H_
#if ( (defined __GOST3411_HAS_SSE2__) || (!defined __GOST3411_HAS_SSE41__) )
#error "Configuration error, choose your implementation!"
#endif
#include <mmintrin.h>
#include <emmintrin.h>
#include <smmintrin.h>
#ifdef __i386__
#define EXTRACT EXTRACT32
#else
#define EXTRACT EXTRACT64
#endif
#ifndef __ICC
#define _mm_cvtsi64_m64(v) (__m64) v
#define _mm_cvtm64_si64(v) (long long) v
#endif
#define LOAD(P, xmm0, xmm1, xmm2, xmm3) { \
const __m128i *__m128p = (const __m128i *) &P[0]; \
xmm0 = _mm_load_si128(&__m128p[0]); \
xmm1 = _mm_load_si128(&__m128p[1]); \
xmm2 = _mm_load_si128(&__m128p[2]); \
xmm3 = _mm_load_si128(&__m128p[3]); \
}
#define UNLOAD(P, xmm0, xmm1, xmm2, xmm3) { \
__m128i *__m128p = (__m128i *) &P[0]; \
_mm_store_si128(&__m128p[0], xmm0); \
_mm_store_si128(&__m128p[1], xmm1); \
_mm_store_si128(&__m128p[2], xmm2); \
_mm_store_si128(&__m128p[3], xmm3); \
}
#define X128R(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7) { \
xmm0 = _mm_xor_si128(xmm0, xmm4); \
xmm1 = _mm_xor_si128(xmm1, xmm5); \
xmm2 = _mm_xor_si128(xmm2, xmm6); \
xmm3 = _mm_xor_si128(xmm3, xmm7); \
}
#define X128M(P, xmm0, xmm1, xmm2, xmm3) { \
const __m128i *__m128p = (const __m128i *) &P[0]; \
xmm0 = _mm_xor_si128(xmm0, _mm_load_si128(&__m128p[0])); \
xmm1 = _mm_xor_si128(xmm1, _mm_load_si128(&__m128p[1])); \
xmm2 = _mm_xor_si128(xmm2, _mm_load_si128(&__m128p[2])); \
xmm3 = _mm_xor_si128(xmm3, _mm_load_si128(&__m128p[3])); \
}
#define _mm_xor_64(mm0, mm1) _mm_xor_si64(mm0, _mm_cvtsi64_m64(mm1))
#define _mm_extract_char(src, ndx) (unsigned char) _mm_extract_epi8(src, ndx)
#define EXTRACT32(row, xmm0, xmm1, xmm2, xmm3, xmm4) { \
__m64 mm0, mm1; \
\
mm0 = _mm_cvtsi64_m64(Ax[0][_mm_extract_char(xmm0, row + 0)]); \
mm0 = _mm_xor_64(mm0, Ax[1][_mm_extract_char(xmm0, row + 8)]); \
mm0 = _mm_xor_64(mm0, Ax[2][_mm_extract_char(xmm1, row + 0)]); \
mm0 = _mm_xor_64(mm0, Ax[3][_mm_extract_char(xmm1, row + 8)]); \
mm0 = _mm_xor_64(mm0, Ax[4][_mm_extract_char(xmm2, row + 0)]); \
mm0 = _mm_xor_64(mm0, Ax[5][_mm_extract_char(xmm2, row + 8)]); \
mm0 = _mm_xor_64(mm0, Ax[6][_mm_extract_char(xmm3, row + 0)]); \
mm0 = _mm_xor_64(mm0, Ax[7][_mm_extract_char(xmm3, row + 8)]); \
\
mm1 = _mm_cvtsi64_m64(Ax[0][_mm_extract_char(xmm0, row + 1)]); \
mm1 = _mm_xor_64(mm1, Ax[1][_mm_extract_char(xmm0, row + 9)]); \
mm1 = _mm_xor_64(mm1, Ax[2][_mm_extract_char(xmm1, row + 1)]); \
mm1 = _mm_xor_64(mm1, Ax[3][_mm_extract_char(xmm1, row + 9)]); \
mm1 = _mm_xor_64(mm1, Ax[4][_mm_extract_char(xmm2, row + 1)]); \
mm1 = _mm_xor_64(mm1, Ax[5][_mm_extract_char(xmm2, row + 9)]); \
mm1 = _mm_xor_64(mm1, Ax[6][_mm_extract_char(xmm3, row + 1)]); \
mm1 = _mm_xor_64(mm1, Ax[7][_mm_extract_char(xmm3, row + 9)]); \
\
xmm4 = _mm_set_epi64(mm1, mm0); \
}
#define EXTRACT64(row, xmm0, xmm1, xmm2, xmm3, xmm4) { \
register unsigned long long r0, r1; \
r0 = Ax[0][_mm_extract_char(xmm0, row + 0)]; \
r0 ^= Ax[1][_mm_extract_char(xmm0, row + 8)]; \
r0 ^= Ax[2][_mm_extract_char(xmm1, row + 0)]; \
r0 ^= Ax[3][_mm_extract_char(xmm1, row + 8)]; \
r0 ^= Ax[4][_mm_extract_char(xmm2, row + 0)]; \
r0 ^= Ax[5][_mm_extract_char(xmm2, row + 8)]; \
r0 ^= Ax[6][_mm_extract_char(xmm3, row + 0)]; \
r0 ^= Ax[7][_mm_extract_char(xmm3, row + 8)]; \
\
r1 = Ax[0][_mm_extract_char(xmm0, row + 1)]; \
r1 ^= Ax[1][_mm_extract_char(xmm0, row + 9)]; \
r1 ^= Ax[2][_mm_extract_char(xmm1, row + 1)]; \
r1 ^= Ax[3][_mm_extract_char(xmm1, row + 9)]; \
r1 ^= Ax[4][_mm_extract_char(xmm2, row + 1)]; \
r1 ^= Ax[5][_mm_extract_char(xmm2, row + 9)]; \
r1 ^= Ax[6][_mm_extract_char(xmm3, row + 1)]; \
r1 ^= Ax[7][_mm_extract_char(xmm3, row + 9)]; \
\
xmm4 = _mm_cvtsi64_si128((long long) r0); \
xmm4 = _mm_insert_epi64(xmm4, (long long) r1, 1); \
}
#define XLPS128M(P, xmm0, xmm1, xmm2, xmm3) { \
__m128i tmm0, tmm1, tmm2, tmm3; \
X128M(P, xmm0, xmm1, xmm2, xmm3); \
\
EXTRACT(0, xmm0, xmm1, xmm2, xmm3, tmm0); \
EXTRACT(2, xmm0, xmm1, xmm2, xmm3, tmm1); \
EXTRACT(4, xmm0, xmm1, xmm2, xmm3, tmm2); \
EXTRACT(6, xmm0, xmm1, xmm2, xmm3, tmm3); \
\
xmm0 = tmm0; \
xmm1 = tmm1; \
xmm2 = tmm2; \
xmm3 = tmm3; \
}
#define XLPS128R(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7) { \
__m128i tmm0, tmm1, tmm2, tmm3; \
X128R(xmm4, xmm5, xmm6, xmm7, xmm0, xmm1, xmm2, xmm3); \
\
EXTRACT(0, xmm4, xmm5, xmm6, xmm7, tmm0); \
EXTRACT(2, xmm4, xmm5, xmm6, xmm7, tmm1); \
EXTRACT(4, xmm4, xmm5, xmm6, xmm7, tmm2); \
EXTRACT(6, xmm4, xmm5, xmm6, xmm7, tmm3); \
\
xmm4 = tmm0; \
xmm5 = tmm1; \
xmm6 = tmm2; \
xmm7 = tmm3; \
}
#define ROUND128(i, xmm0, xmm2, xmm4, xmm6, xmm1, xmm3, xmm5, xmm7) { \
XLPS128M((&C[i]), xmm0, xmm2, xmm4, xmm6); \
XLPS128R(xmm0, xmm2, xmm4, xmm6, xmm1, xmm3, xmm5, xmm7); \
}
#endif /* GOST_3411_2012_CORE_SSE41_H_ */