Crypto++ 8.2
Free C&
gcm_simd.cpp
1// gcm_simd.cpp - written and placed in the public domain by
2// Jeffrey Walton, Uri Blumenthal and Marcel Raad.
3// Original x86 CLMUL by Wei Dai. ARM and POWER8
4// PMULL and VMULL by JW, UB and MR.
5//
6// This source file uses intrinsics to gain access to SSE4.2 and
7// ARMv8a CRC-32 and CRC-32C instructions. A separate source file
8// is needed because additional CXXFLAGS are required to enable
9// the appropriate instructions sets in some build configurations.
10
11#include "pch.h"
12#include "config.h"
13#include "misc.h"
14
15#if defined(CRYPTOPP_DISABLE_GCM_ASM)
16# undef CRYPTOPP_X86_ASM_AVAILABLE
17# undef CRYPTOPP_X32_ASM_AVAILABLE
18# undef CRYPTOPP_X64_ASM_AVAILABLE
19# undef CRYPTOPP_SSE2_ASM_AVAILABLE
20#endif
21
22#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23# include <emmintrin.h>
24# include <xmmintrin.h>
25#endif
26
27#if (CRYPTOPP_CLMUL_AVAILABLE)
28# include <tmmintrin.h>
29# include <wmmintrin.h>
30#endif
31
32// C1189: error: This header is specific to ARM targets
33#if (CRYPTOPP_ARM_NEON_AVAILABLE) && !defined(_M_ARM64)
34# include <arm_neon.h>
35#endif
36
37#if (CRYPTOPP_ARM_ACLE_AVAILABLE)
38# include <stdint.h>
39# include <arm_acle.h>
40#endif
41
42#if defined(CRYPTOPP_ARM_PMULL_AVAILABLE)
43# include "arm_simd.h"
44#endif
45
46#if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
47# include "ppc_simd.h"
48#endif
49
50#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
51# include <signal.h>
52# include <setjmp.h>
53#endif
54
55#ifndef EXCEPTION_EXECUTE_HANDLER
56# define EXCEPTION_EXECUTE_HANDLER 1
57#endif
58
59// Clang __m128i casts, http://bugs.llvm.org/show_bug.cgi?id=20670
60#define M128_CAST(x) ((__m128i *)(void *)(x))
61#define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
62
63// GCC cast warning
64#define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x))
65#define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x))
66
67// Squash MS LNK4221 and libtool warnings
68extern const char GCM_SIMD_FNAME[] = __FILE__;
69
70NAMESPACE_BEGIN(CryptoPP)
71
72// ************************* Feature Probes ************************* //
73
74#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
75extern "C" {
76 typedef void (*SigHandler)(int);
77
78 static jmp_buf s_jmpSIGILL;
79 static void SigIllHandler(int)
80 {
81 longjmp(s_jmpSIGILL, 1);
82 }
83}
84#endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
85
86#if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
87bool CPU_ProbePMULL()
88{
89#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
90 return false;
91#elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
92# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
93 volatile bool result = true;
94 __try
95 {
96 // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
97 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
98 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
99
100 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
101 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
102 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
103 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
104 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
105
106 const uint64x2_t r1 = PMULL_00(a1, b1);
107 const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
108 vreinterpretq_u64_u8(b2));
109
110 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
111 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
112 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
113 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
114 }
115 __except (EXCEPTION_EXECUTE_HANDLER)
116 {
117 return false;
118 }
119 return result;
120# else
121
122 // longjmp and clobber warnings. Volatile is required.
123 volatile bool result = true;
124
125 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
126 if (oldHandler == SIG_ERR)
127 return false;
128
129 volatile sigset_t oldMask;
130 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
131 return false;
132
133 if (setjmp(s_jmpSIGILL))
134 result = false;
135 else
136 {
137 // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
138 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
139 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
140
141 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
142 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
143 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
144 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
145 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
146
147 const uint64x2_t r1 = PMULL_00(a1, b1);
148 const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
149 vreinterpretq_u64_u8(b2));
150
151 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
152 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
153 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
154 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
155 }
156
157 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
158 signal(SIGILL, oldHandler);
159 return result;
160# endif
161#else
162 return false;
163#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
164}
165#endif // ARM32 or ARM64
166
167#if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
168bool CPU_ProbePMULL()
169{
170#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
171 return false;
172#elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
173 // longjmp and clobber warnings. Volatile is required.
174 volatile bool result = true;
175
176 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
177 if (oldHandler == SIG_ERR)
178 return false;
179
180 volatile sigset_t oldMask;
181 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
182 return false;
183
184 if (setjmp(s_jmpSIGILL))
185 result = false;
186 else
187 {
188 const uint64_t wa1[]={0,W64LIT(0x9090909090909090)},
189 wb1[]={0,W64LIT(0xb0b0b0b0b0b0b0b0)};
190 const uint64x2_p a1=VecLoad(wa1), b1=VecLoad(wb1);
191
192 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
193 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
194 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
195 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
196 const uint32x4_p a2=VecLoad(wa2), b2=VecLoad(wb2);
197
198 const uint64x2_p r1 = VecPolyMultiply00LE(a1, b1);
200
201 const uint64_t wc1[]={W64LIT(0x5300530053005300), W64LIT(0x5300530053005300)},
202 wc2[]={W64LIT(0x6c006c006c006c00), W64LIT(0x6c006c006c006c00)};
203 const uint64x2_p c1=VecLoad(wc1), c2=VecLoad(wc2);
204
205 result = !!(VecEqual(r1, c1) && VecEqual(r2, c2));
206 }
207
208 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
209 signal(SIGILL, oldHandler);
210 return result;
211#else
212 return false;
213#endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
214}
215#endif // PPC32 or PPC64
216
217// *************************** ARM NEON *************************** //
218
219#if CRYPTOPP_ARM_NEON_AVAILABLE
220void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
221{
222 CRYPTOPP_ASSERT(IsAlignedOn(a,GetAlignmentOf<uint64x2_t>()));
223 CRYPTOPP_ASSERT(IsAlignedOn(b,GetAlignmentOf<uint64x2_t>()));
224 CRYPTOPP_ASSERT(IsAlignedOn(c,GetAlignmentOf<uint64x2_t>()));
225 *UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
226}
227#endif // CRYPTOPP_ARM_NEON_AVAILABLE
228
229#if CRYPTOPP_ARM_PMULL_AVAILABLE
230
231// Swaps high and low 64-bit words
232inline uint64x2_t SwapWords(const uint64x2_t& data)
233{
234 return (uint64x2_t)vcombine_u64(
235 vget_high_u64(data), vget_low_u64(data));
236}
237
238uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
239{
240 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
241 c1 = veorq_u64(c1, PMULL_01(c0, r));
242 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
243 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
244 c0 = PMULL_00(c0, r);
245 c2 = veorq_u64(c2, c0);
246 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
247 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
248 c2 = vshlq_n_u64(c2, 1);
249
250 return veorq_u64(c2, c1);
251}
252
253uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
254{
255 const uint64x2_t c0 = PMULL_00(x, h);
256 const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
257 const uint64x2_t c2 = PMULL_11(x, h);
258
259 return GCM_Reduce_PMULL(c0, c1, c2, r);
260}
261
262void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
263{
264 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
265 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
266 const uint64x2_t h0 = vextq_u64(t, t, 1);
267
268 uint64x2_t h = h0;
269 unsigned int i;
270 for (i=0; i<tableSize-32; i+=32)
271 {
272 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
273 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
274 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
275 vst1q_u64((uint64_t *)(mulTable+i+8), h);
276 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
277 h = GCM_Multiply_PMULL(h1, h0, r);
278 }
279
280 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
281 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
282 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
283 vst1q_u64((uint64_t *)(mulTable+i+8), h);
284 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
285}
286
287size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
288{
289 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
290 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
291
292 while (len >= 16)
293 {
294 size_t i=0, s = UnsignedMin(len/16U, 8U);
295 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
296 uint64x2_t c0 = vdupq_n_u64(0);
297 uint64x2_t c1 = vdupq_n_u64(0);
298 uint64x2_t c2 = vdupq_n_u64(0);
299
300 while (true)
301 {
302 const uint64x2_t h0 = vld1q_u64((const uint64_t*)(mtable+(i+0)*16));
303 const uint64x2_t h1 = vld1q_u64((const uint64_t*)(mtable+(i+1)*16));
304 const uint64x2_t h2 = veorq_u64(h0, h1);
305
306 if (++i == s)
307 {
308 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
309 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
310 c0 = veorq_u64(c0, PMULL_00(d1, h0));
311 c2 = veorq_u64(c2, PMULL_10(d1, h1));
312 d1 = veorq_u64(d1, SwapWords(d1));
313 c1 = veorq_u64(c1, PMULL_00(d1, h2));
314
315 break;
316 }
317
318 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
319 c0 = veorq_u64(c0, PMULL_10(d2, h0));
320 c2 = veorq_u64(c2, PMULL_10(d1, h1));
321 d2 = veorq_u64(d2, d1);
322 c1 = veorq_u64(c1, PMULL_10(d2, h2));
323
324 if (++i == s)
325 {
326 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
327 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
328 c0 = veorq_u64(c0, PMULL_01(d1, h0));
329 c2 = veorq_u64(c2, PMULL_11(d1, h1));
330 d1 = veorq_u64(d1, SwapWords(d1));
331 c1 = veorq_u64(c1, PMULL_01(d1, h2));
332
333 break;
334 }
335
336 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
337 d2 = vextq_u64(t3, t3, 1);
338 c0 = veorq_u64(c0, PMULL_01(d1, h0));
339 c2 = veorq_u64(c2, PMULL_01(d2, h1));
340 d1 = veorq_u64(d1, d2);
341 c1 = veorq_u64(c1, PMULL_01(d1, h2));
342 }
343 data += s*16;
344 len -= s*16;
345
346 c1 = veorq_u64(veorq_u64(c1, c0), c2);
347 x = GCM_Reduce_PMULL(c0, c1, c2, r);
348 }
349
350 vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
351 return len;
352}
353
354void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
355{
357 {
358 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
359 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
360 }
361}
362#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
363
364// ***************************** SSE ***************************** //
365
366#if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
367// SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
368// a source file with a SSE architecture switch. Also see GH #226 and GH #284.
369void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
370{
371# if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
372 asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
373 : "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
374# else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
375 _mm_store_si128(M128_CAST(a), _mm_xor_si128(
376 _mm_load_si128(CONST_M128_CAST(b)),
377 _mm_load_si128(CONST_M128_CAST(c))));
378# endif
379}
380#endif // CRYPTOPP_SSE2_ASM_AVAILABLE
381
382#if CRYPTOPP_CLMUL_AVAILABLE
383
384#if 0
385// preserved for testing
386void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
387{
388 word64 Z0=0, Z1=0, V0, V1;
389
391 Block::Get(a)(V0)(V1);
392
393 for (int i=0; i<16; i++)
394 {
395 for (int j=0x80; j!=0; j>>=1)
396 {
397 int x = b[i] & j;
398 Z0 ^= x ? V0 : 0;
399 Z1 ^= x ? V1 : 0;
400 x = (int)V1 & 1;
401 V1 = (V1>>1) | (V0<<63);
402 V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
403 }
404 }
405 Block::Put(NULLPTR, c)(Z0)(Z1);
406}
407
408__m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
409{
410 word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
411 word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
412
413 PolynomialMod2 pa((byte *)A, 8);
414 PolynomialMod2 pb((byte *)B, 8);
415 PolynomialMod2 c = pa*pb;
416
417 __m128i output;
418 for (int i=0; i<16; i++)
419 ((byte *)&output)[i] = c.GetByte(i);
420 return output;
421}
422#endif // Testing
423
424// Swaps high and low 64-bit words
425inline __m128i SwapWords(const __m128i& val)
426{
427 return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
428}
429
430// SunCC 5.11-5.15 compiler crash. Make the function inline
431// and parameters non-const. Also see GH #188 and GH #224.
432inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i& r)
433{
434 /*
435 The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
436 significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
437 rightmost bit positions, and the lowest byte addresses.
438
439 c1 ^= c0t * 0xc200000000000000
440 c2t ^= c0t
441 t = shift (c1t ^ c0b) left 1 bit
442 c2 ^= t * 0xe100000000000000
443 c2t ^= c1b
444 shift c2 left 1 bit and xor in lowest bit of c1t
445 */
446 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
447 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
448 c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
449 c0 = _mm_slli_epi64(c0, 1);
450 c0 = _mm_clmulepi64_si128(c0, r, 0);
451 c2 = _mm_xor_si128(c2, c0);
452 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
453 c1 = _mm_unpacklo_epi64(c1, c2);
454 c1 = _mm_srli_epi64(c1, 63);
455 c2 = _mm_slli_epi64(c2, 1);
456 return _mm_xor_si128(c2, c1);
457}
458
459// SunCC 5.13-5.14 compiler crash. Don't make the function inline.
460// This is in contrast to GCM_Reduce_CLMUL, which must be inline.
461__m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
462{
463 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
464 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
465 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
466
467 return GCM_Reduce_CLMUL(c0, c1, c2, r);
468}
469
470void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
471{
472 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
473 const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
474 __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
475
476 unsigned int i;
477 for (i=0; i<tableSize-32; i+=32)
478 {
479 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
480 _mm_storel_epi64(M128_CAST(mulTable+i), h);
481 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
482 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
483 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
484 h = GCM_Multiply_CLMUL(h1, h0, r);
485 }
486
487 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
488 _mm_storel_epi64(M128_CAST(mulTable+i), h);
489 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
490 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
491 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
492}
493
494size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
495{
496 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
497 const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
498 const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
499 __m128i x = _mm_load_si128(M128_CAST(hbuffer));
500
501 while (len >= 16)
502 {
503 size_t i=0, s = UnsignedMin(len/16, 8U);
504 __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
505 __m128i d2 = _mm_shuffle_epi8(d1, m2);
506 __m128i c0 = _mm_setzero_si128();
507 __m128i c1 = _mm_setzero_si128();
508 __m128i c2 = _mm_setzero_si128();
509
510 while (true)
511 {
512 const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
513 const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
514 const __m128i h2 = _mm_xor_si128(h0, h1);
515
516 if (++i == s)
517 {
518 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
519 d1 = _mm_xor_si128(d1, x);
520 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
521 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
522 d1 = _mm_xor_si128(d1, SwapWords(d1));
523 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
524 break;
525 }
526
527 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
528 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
529 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
530 d2 = _mm_xor_si128(d2, d1);
531 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
532
533 if (++i == s)
534 {
535 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
536 d1 = _mm_xor_si128(d1, x);
537 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
538 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
539 d1 = _mm_xor_si128(d1, SwapWords(d1));
540 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
541 break;
542 }
543
544 d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
545 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
546 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
547 d1 = _mm_xor_si128(d1, d2);
548 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
549 }
550 data += s*16;
551 len -= s*16;
552
553 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
554 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
555 }
556
557 _mm_store_si128(M128_CAST(hbuffer), x);
558 return len;
559}
560
561void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
562{
563 // SSSE3 instruction, but only used with CLMUL
564 const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
565 _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
566 _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
567}
568#endif // CRYPTOPP_CLMUL_AVAILABLE
569
570// ***************************** POWER8 ***************************** //
571
572#if CRYPTOPP_POWER8_AVAILABLE
573void GCM_Xor16_POWER8(byte *a, const byte *b, const byte *c)
574{
575 VecStore(VecXor(VecLoad(b), VecLoad(c)), a);
576}
577#endif // CRYPTOPP_POWER8_AVAILABLE
578
579#if CRYPTOPP_POWER8_VMULL_AVAILABLE
580
581uint64x2_p GCM_Reduce_VMULL(uint64x2_p c0, uint64x2_p c1, uint64x2_p c2, uint64x2_p r)
582{
583 const uint64x2_p m1 = {1,1}, m63 = {63,63};
584
585 c1 = VecXor(c1, VecShiftRightOctet<8>(c0));
586 c1 = VecXor(c1, VecPolyMultiply10LE(c0, r));
587 c0 = VecXor(c1, VecShiftLeftOctet<8>(c0));
588 c0 = VecPolyMultiply00LE(vec_sl(c0, m1), r);
589 c2 = VecXor(c2, c0);
590 c2 = VecXor(c2, VecShiftLeftOctet<8>(c1));
591 c1 = vec_sr(vec_mergeh(c1, c2), m63);
592 c2 = vec_sl(c2, m1);
593
594 return VecXor(c2, c1);
595}
596
597inline uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r)
598{
599 const uint64x2_p c0 = VecPolyMultiply00LE(x, h);
601 const uint64x2_p c2 = VecPolyMultiply11LE(x, h);
602
603 return GCM_Reduce_VMULL(c0, c1, c2, r);
604}
605
606inline uint64x2_p LoadHashKey(const byte *hashKey)
607{
608#if (CRYPTOPP_BIG_ENDIAN)
609 const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
610 const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
611 return VecPermute(key, key, mask);
612#else
613 const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
614 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
615 return VecPermute(key, key, mask);
616#endif
617}
618
619void GCM_SetKeyWithoutResync_VMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
620{
621 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
622 uint64x2_p h = LoadHashKey(hashKey), h0 = h;
623
624 unsigned int i;
625 uint64_t temp[2];
626
627 for (i=0; i<tableSize-32; i+=32)
628 {
629 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
630 VecStore(h, (byte*)temp);
631 std::memcpy(mulTable+i, temp+0, 8);
632 VecStore(h1, mulTable+i+16);
633 VecStore(h, mulTable+i+8);
634 VecStore(h1, (byte*)temp);
635 std::memcpy(mulTable+i+8, temp+0, 8);
636 h = GCM_Multiply_VMULL(h1, h0, r);
637 }
638
639 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
640 VecStore(h, (byte*)temp);
641 std::memcpy(mulTable+i, temp+0, 8);
642 VecStore(h1, mulTable+i+16);
643 VecStore(h, mulTable+i+8);
644 VecStore(h1, (byte*)temp);
645 std::memcpy(mulTable+i+8, temp+0, 8);
646}
647
648// Swaps high and low 64-bit words
649template <class T>
650inline T SwapWords(const T& data)
651{
652 return (T)VecRotateLeftOctet<8>(data);
653}
654
655inline uint64x2_p LoadBuffer1(const byte *dataBuffer)
656{
657#if (CRYPTOPP_BIG_ENDIAN)
658 return (uint64x2_p)VecLoad(dataBuffer);
659#else
660 const uint64x2_p data = (uint64x2_p)VecLoad(dataBuffer);
661 const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
662 return VecPermute(data, data, mask);
663#endif
664}
665
666inline uint64x2_p LoadBuffer2(const byte *dataBuffer)
667{
668#if (CRYPTOPP_BIG_ENDIAN)
669 return (uint64x2_p)SwapWords(VecLoadBE(dataBuffer));
670#else
671 return (uint64x2_p)VecLoadBE(dataBuffer);
672#endif
673}
674
675size_t GCM_AuthenticateBlocks_VMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
676{
677 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
678 uint64x2_p x = (uint64x2_p)VecLoad(hbuffer);
679
680 while (len >= 16)
681 {
682 size_t i=0, s = UnsignedMin(len/16, 8U);
683 uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
684 uint64x2_p c0 = {0}, c1 = {0}, c2 = {0};
685
686 while (true)
687 {
688 const uint64x2_p h0 = (uint64x2_p)VecLoad(mtable+(i+0)*16);
689 const uint64x2_p h1 = (uint64x2_p)VecLoad(mtable+(i+1)*16);
690 const uint64x2_p h2 = (uint64x2_p)VecXor(h0, h1);
691
692 if (++i == s)
693 {
694 d1 = LoadBuffer2(data);
695 d1 = VecXor(d1, x);
696 c0 = VecXor(c0, VecPolyMultiply00LE(d1, h0));
697 c2 = VecXor(c2, VecPolyMultiply01LE(d1, h1));
698 d1 = VecXor(d1, SwapWords(d1));
699 c1 = VecXor(c1, VecPolyMultiply00LE(d1, h2));
700 break;
701 }
702
703 d1 = LoadBuffer1(data+(s-i)*16-8);
704 c0 = VecXor(c0, VecPolyMultiply01LE(d2, h0));
705 c2 = VecXor(c2, VecPolyMultiply01LE(d1, h1));
706 d2 = VecXor(d2, d1);
707 c1 = VecXor(c1, VecPolyMultiply01LE(d2, h2));
708
709 if (++i == s)
710 {
711 d1 = LoadBuffer2(data);
712 d1 = VecXor(d1, x);
713 c0 = VecXor(c0, VecPolyMultiply10LE(d1, h0));
714 c2 = VecXor(c2, VecPolyMultiply11LE(d1, h1));
715 d1 = VecXor(d1, SwapWords(d1));
716 c1 = VecXor(c1, VecPolyMultiply10LE(d1, h2));
717 break;
718 }
719
720 d2 = LoadBuffer2(data+(s-i)*16-8);
721 c0 = VecXor(c0, VecPolyMultiply10LE(d1, h0));
722 c2 = VecXor(c2, VecPolyMultiply10LE(d2, h1));
723 d1 = VecXor(d1, d2);
724 c1 = VecXor(c1, VecPolyMultiply10LE(d1, h2));
725 }
726 data += s*16;
727 len -= s*16;
728
729 c1 = VecXor(VecXor(c1, c0), c2);
730 x = GCM_Reduce_VMULL(c0, c1, c2, r);
731 }
732
733 VecStore(x, hbuffer);
734 return len;
735}
736
737void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
738{
739 const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
740 VecStore(VecPermute(VecLoad(hashBuffer), mask), hashBuffer);
741}
742#endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
743
744NAMESPACE_END
Support functions for ARM and vector operations.
uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:35
uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:125
uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:65
uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:95
Polynomial with Coefficients in GF(2)
Definition: gf2n.h:27
Access a block of memory.
Definition: misc.h:2496
Library configuration file.
@ BIG_ENDIAN_ORDER
byte order is big-endian
Definition: cryptlib.h:147
Utility functions for the Crypto++ library.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:1972
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
Definition: misc.h:1143
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
Definition: misc.h:606
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Definition: misc.h:1180
Crypto++ library namespace.
Precompiled header file.
Support functions for PowerPC and vector operations.
uint64x2_p VecPolyMultiply11LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:1567
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:440
uint64x2_p VecPolyMultiply00LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:1501
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Definition: ppc_simd.h:129
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
Definition: ppc_simd.h:1010
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
Definition: ppc_simd.h:119
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
Definition: ppc_simd.h:916
uint64x2_p VecPolyMultiply10LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:1545
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Definition: ppc_simd.h:139
bool VecEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
Definition: ppc_simd.h:1407
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
Definition: ppc_simd.h:605
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:253
uint64x2_p VecPolyMultiply01LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:1523
Access a block of memory.
Definition: misc.h:2533
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
Definition: trap.h:69