15 #if defined(CRYPTOPP_DISABLE_GCM_ASM) 16 # undef CRYPTOPP_X86_ASM_AVAILABLE 17 # undef CRYPTOPP_X32_ASM_AVAILABLE 18 # undef CRYPTOPP_X64_ASM_AVAILABLE 19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE 22 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) 23 # include <emmintrin.h> 24 # include <xmmintrin.h> 27 #if (CRYPTOPP_CLMUL_AVAILABLE) 28 # include <tmmintrin.h> 29 # include <wmmintrin.h> 32 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 33 # include <arm_neon.h> 36 #if (CRYPTOPP_ARM_ACLE_AVAILABLE) 38 # include <arm_acle.h> 41 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE) 45 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 50 #ifndef EXCEPTION_EXECUTE_HANDLER 51 # define EXCEPTION_EXECUTE_HANDLER 1 55 #define M128_CAST(x) ((__m128i *)(void *)(x)) 56 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) 59 #define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x)) 60 #define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x)) 63 extern const char GCM_SIMD_FNAME[] = __FILE__;
65 ANONYMOUS_NAMESPACE_BEGIN
69 #if CRYPTOPP_ARM_PMULL_AVAILABLE 75 inline uint64x2_t PMULL_00(
const uint64x2_t a,
const uint64x2_t b)
78 __asm __volatile(
"pmull %0.1q, %1.1d, %2.1d \n\t" 79 :
"=w" (r) :
"w" (a),
"w" (b) );
83 inline uint64x2_t PMULL_01(
const uint64x2_t a,
const uint64x2_t b)
86 __asm __volatile(
"pmull %0.1q, %1.1d, %2.1d \n\t" 87 :
"=w" (r) :
"w" (a),
"w" (vget_high_u64(b)) );
91 inline uint64x2_t PMULL_10(
const uint64x2_t a,
const uint64x2_t b)
94 __asm __volatile(
"pmull %0.1q, %1.1d, %2.1d \n\t" 95 :
"=w" (r) :
"w" (vget_high_u64(a)),
"w" (b) );
99 inline uint64x2_t PMULL_11(
const uint64x2_t a,
const uint64x2_t b)
102 __asm __volatile(
"pmull2 %0.1q, %1.2d, %2.2d \n\t" 103 :
"=w" (r) :
"w" (a),
"w" (b) );
107 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b,
unsigned int c)
110 __asm __volatile(
"ext %0.16b, %1.16b, %2.16b, %3 \n\t" 111 :
"=w" (r) :
"w" (a),
"w" (b),
"I" (c) );
116 template <
unsigned int C>
117 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
120 __asm __volatile(
"ext %0.16b, %1.16b, %2.16b, %3 \n\t" 121 :
"=w" (r) :
"w" (a),
"w" (b),
"I" (C) );
124 #endif // GCC and compatibles 126 #if defined(_MSC_VER) 127 inline uint64x2_t PMULL_00(
const uint64x2_t a,
const uint64x2_t b)
129 return (uint64x2_t)(vmull_p64(
130 vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
131 vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
134 inline uint64x2_t PMULL_01(
const uint64x2_t a,
const uint64x2_t b)
136 return (uint64x2_t)(vmull_p64(
137 vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
138 vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
141 inline uint64x2_t PMULL_10(
const uint64x2_t a,
const uint64x2_t b)
143 return (uint64x2_t)(vmull_p64(
144 vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
145 vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
148 inline uint64x2_t PMULL_11(
const uint64x2_t a,
const uint64x2_t b)
150 return (uint64x2_t)(vmull_p64(
151 vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
152 vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
155 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b,
unsigned int c)
157 return (uint64x2_t)vextq_u8(
158 vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), c);
162 template <
unsigned int C>
163 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
165 return (uint64x2_t)vextq_u8(
166 vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), C);
168 #endif // Microsoft and compatibles 169 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE 173 #if CRYPTOPP_POWER8_VMULL_AVAILABLE 197 #if (CRYPTOPP_BIG_ENDIAN) 198 return VecRotateLeftOctet<8>(val);
207 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 209 #elif defined(__clang__) 222 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 223 return VMULL2LE(__vpmsumd (a,
VecGetHigh(b)));
224 #elif defined(__clang__) 225 return VMULL2LE(__builtin_altivec_crypto_vpmsumd (a,
VecGetHigh(b)));
227 return VMULL2LE(__builtin_crypto_vpmsumd (a,
VecGetHigh(b)));
237 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 238 return VMULL2LE(__vpmsumd (
VecGetHigh(a), b));
239 #elif defined(__clang__) 240 return VMULL2LE(__builtin_altivec_crypto_vpmsumd (
VecGetHigh(a), b));
242 return VMULL2LE(__builtin_crypto_vpmsumd (
VecGetHigh(a), b));
252 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 253 return VMULL2LE(__vpmsumd (
VecGetLow(a), b));
254 #elif defined(__clang__) 255 return VMULL2LE(__builtin_altivec_crypto_vpmsumd (
VecGetLow(a), b));
257 return VMULL2LE(__builtin_crypto_vpmsumd (
VecGetLow(a), b));
260 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE 262 ANONYMOUS_NAMESPACE_END
268 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 270 typedef void (*SigHandler)(int);
272 static jmp_buf s_jmpSIGILL;
273 static void SigIllHandler(
int)
275 longjmp(s_jmpSIGILL, 1);
278 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY 280 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8) 281 bool CPU_ProbePMULL()
283 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 285 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) 286 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) 287 volatile bool result =
true;
290 const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
291 const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
292 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
293 b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
294 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
296 const poly128_t r1 = pmull_p64(a1, b1);
297 const poly128_t r2 = pmull_high_p64((poly64x2_t)(a2), (poly64x2_t)(b2));
300 const uint64x2_t t1 = (uint64x2_t)(r1);
301 const uint64x2_t t2 = (uint64x2_t)(r2);
303 result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 &&
304 vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
305 vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 &&
306 vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
308 __except (EXCEPTION_EXECUTE_HANDLER)
316 volatile bool result =
true;
318 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
319 if (oldHandler == SIG_ERR)
322 volatile sigset_t oldMask;
323 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
326 if (setjmp(s_jmpSIGILL))
331 const uint64x2_t a1={0,0x9090909090909090}, b1={0,0xb0b0b0b0b0b0b0b0};
332 const uint8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
333 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
334 b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
335 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
337 const uint64x2_t r1 = PMULL_00(a1, b1);
338 const uint64x2_t r2 = PMULL_11((uint64x2_t)a2, (uint64x2_t)b2);
340 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
341 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
342 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
343 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
346 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
347 signal(SIGILL, oldHandler);
352 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE 354 #endif // ARM32 or ARM64 356 #if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64) 357 bool CPU_ProbePMULL()
359 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 361 #elif (CRYPTOPP_POWER8_VMULL_AVAILABLE) 363 volatile bool result =
true;
365 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
366 if (oldHandler == SIG_ERR)
369 volatile sigset_t oldMask;
370 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
373 if (setjmp(s_jmpSIGILL))
377 const uint8x16_p a={0x0f,0x08,0x08,0x08, 0x80,0x80,0x80,0x80,
378 0x00,0x0a,0x0a,0x0a, 0xa0,0xa0,0xa0,0xa0},
379 b={0x0f,0xc0,0xc0,0xc0, 0x0c,0x0c,0x0c,0x0c,
380 0x00,0xe0,0xe0,0xe0, 0x0e,0x0e,0x0e,0x0e};
390 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
391 signal(SIGILL, oldHandler);
395 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE 397 #endif // PPC32 or PPC64 401 #if CRYPTOPP_ARM_NEON_AVAILABLE 402 void GCM_Xor16_NEON(byte *a,
const byte *b,
const byte *c)
407 *UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
409 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 411 #if CRYPTOPP_ARM_PMULL_AVAILABLE 414 inline uint64x2_t SwapWords(
const uint64x2_t& data)
416 return (uint64x2_t)vcombine_u64(
417 vget_high_u64(data), vget_low_u64(data));
420 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2,
const uint64x2_t &r)
422 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
423 c1 = veorq_u64(c1, PMULL_01(c0, r));
424 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
425 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
426 c0 = PMULL_00(c0, r);
427 c2 = veorq_u64(c2, c0);
428 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
429 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
430 c2 = vshlq_n_u64(c2, 1);
432 return veorq_u64(c2, c1);
435 uint64x2_t GCM_Multiply_PMULL(
const uint64x2_t &x,
const uint64x2_t &h,
const uint64x2_t &r)
437 const uint64x2_t c0 = PMULL_00(x, h);
438 const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
439 const uint64x2_t c2 = PMULL_11(x, h);
441 return GCM_Reduce_PMULL(c0, c1, c2, r);
444 void GCM_SetKeyWithoutResync_PMULL(
const byte *hashKey, byte *mulTable,
unsigned int tableSize)
446 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
447 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
448 const uint64x2_t h0 = vextq_u64(t, t, 1);
452 for (i=0; i<tableSize-32; i+=32)
454 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
455 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
456 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
457 vst1q_u64((uint64_t *)(mulTable+i+8), h);
458 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
459 h = GCM_Multiply_PMULL(h1, h0, r);
462 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
463 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
464 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
465 vst1q_u64((uint64_t *)(mulTable+i+8), h);
466 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
469 size_t GCM_AuthenticateBlocks_PMULL(
const byte *data,
size_t len,
const byte *mtable, byte *hbuffer)
471 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
472 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
477 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
478 uint64x2_t c0 = vdupq_n_u64(0);
479 uint64x2_t c1 = vdupq_n_u64(0);
480 uint64x2_t c2 = vdupq_n_u64(0);
484 const uint64x2_t h0 = vld1q_u64((
const uint64_t*)(mtable+(i+0)*16));
485 const uint64x2_t h1 = vld1q_u64((
const uint64_t*)(mtable+(i+1)*16));
486 const uint64x2_t h2 = veorq_u64(h0, h1);
490 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
491 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
492 c0 = veorq_u64(c0, PMULL_00(d1, h0));
493 c2 = veorq_u64(c2, PMULL_10(d1, h1));
494 d1 = veorq_u64(d1, SwapWords(d1));
495 c1 = veorq_u64(c1, PMULL_00(d1, h2));
500 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
501 c0 = veorq_u64(c0, PMULL_10(d2, h0));
502 c2 = veorq_u64(c2, PMULL_10(d1, h1));
503 d2 = veorq_u64(d2, d1);
504 c1 = veorq_u64(c1, PMULL_10(d2, h2));
508 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
509 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
510 c0 = veorq_u64(c0, PMULL_01(d1, h0));
511 c2 = veorq_u64(c2, PMULL_11(d1, h1));
512 d1 = veorq_u64(d1, SwapWords(d1));
513 c1 = veorq_u64(c1, PMULL_01(d1, h2));
518 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
519 d2 = vextq_u64(t3, t3, 1);
520 c0 = veorq_u64(c0, PMULL_01(d1, h0));
521 c2 = veorq_u64(c2, PMULL_01(d2, h1));
522 d1 = veorq_u64(d1, d2);
523 c1 = veorq_u64(c1, PMULL_01(d1, h2));
528 c1 = veorq_u64(veorq_u64(c1, c0), c2);
529 x = GCM_Reduce_PMULL(c0, c1, c2, r);
532 vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
536 void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
540 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
541 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
544 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE 548 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE 551 void GCM_Xor16_SSE2(byte *a,
const byte *b,
const byte *c)
553 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__) 554 asm (
"movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;" 555 :
"=m" (a[0]) :
"m"(b[0]),
"m"(c[0]));
556 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE 557 _mm_store_si128(M128_CAST(a), _mm_xor_si128(
558 _mm_load_si128(CONST_M128_CAST(b)),
559 _mm_load_si128(CONST_M128_CAST(c))));
562 #endif // CRYPTOPP_SSE2_ASM_AVAILABLE 564 #if CRYPTOPP_CLMUL_AVAILABLE 568 void gcm_gf_mult(
const unsigned char *a,
const unsigned char *b,
unsigned char *c)
570 word64 Z0=0, Z1=0, V0, V1;
573 Block::Get(a)(V0)(V1);
575 for (
int i=0; i<16; i++)
577 for (
int j=0x80; j!=0; j>>=1)
583 V1 = (V1>>1) | (V0<<63);
584 V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
590 __m128i _mm_clmulepi64_si128(
const __m128i &a,
const __m128i &b,
int i)
600 for (
int i=0; i<16; i++)
601 ((byte *)&output)[i] = c.
GetByte(i);
607 inline __m128i SwapWords(
const __m128i& val)
609 return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
614 inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2,
const __m128i& r)
628 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
629 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
630 c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
631 c0 = _mm_slli_epi64(c0, 1);
632 c0 = _mm_clmulepi64_si128(c0, r, 0);
633 c2 = _mm_xor_si128(c2, c0);
634 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
635 c1 = _mm_unpacklo_epi64(c1, c2);
636 c1 = _mm_srli_epi64(c1, 63);
637 c2 = _mm_slli_epi64(c2, 1);
638 return _mm_xor_si128(c2, c1);
643 __m128i GCM_Multiply_CLMUL(
const __m128i &x,
const __m128i &h,
const __m128i &r)
645 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
646 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
647 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
649 return GCM_Reduce_CLMUL(c0, c1, c2, r);
652 void GCM_SetKeyWithoutResync_CLMUL(
const byte *hashKey, byte *mulTable,
unsigned int tableSize)
654 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
655 const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
656 __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
659 for (i=0; i<tableSize-32; i+=32)
661 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
662 _mm_storel_epi64(M128_CAST(mulTable+i), h);
663 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
664 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
665 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
666 h = GCM_Multiply_CLMUL(h1, h0, r);
669 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
670 _mm_storel_epi64(M128_CAST(mulTable+i), h);
671 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
672 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
673 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
676 size_t GCM_AuthenticateBlocks_CLMUL(
const byte *data,
size_t len,
const byte *mtable, byte *hbuffer)
678 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
679 const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
680 const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
681 __m128i x = _mm_load_si128(M128_CAST(hbuffer));
686 __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
687 __m128i d2 = _mm_shuffle_epi8(d1, m2);
688 __m128i c0 = _mm_setzero_si128();
689 __m128i c1 = _mm_setzero_si128();
690 __m128i c2 = _mm_setzero_si128();
694 const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
695 const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
696 const __m128i h2 = _mm_xor_si128(h0, h1);
700 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
701 d1 = _mm_xor_si128(d1, x);
702 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
703 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
704 d1 = _mm_xor_si128(d1, SwapWords(d1));
705 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
709 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
710 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
711 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
712 d2 = _mm_xor_si128(d2, d1);
713 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
717 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
718 d1 = _mm_xor_si128(d1, x);
719 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
720 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
721 d1 = _mm_xor_si128(d1, SwapWords(d1));
722 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
726 d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
727 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
728 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
729 d1 = _mm_xor_si128(d1, d2);
730 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
735 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
736 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
739 _mm_store_si128(M128_CAST(hbuffer), x);
743 void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
746 const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
747 _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
748 _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
750 #endif // CRYPTOPP_CLMUL_AVAILABLE 754 #if CRYPTOPP_POWER7_AVAILABLE 755 void GCM_Xor16_POWER7(byte *a,
const byte *b,
const byte *c)
759 #endif // CRYPTOPP_POWER7_AVAILABLE 761 #if CRYPTOPP_POWER8_VMULL_AVAILABLE 767 c1 =
VecXor(c1, VecShiftRightOctet<8>(c0));
768 c1 =
VecXor(c1, VMULL_10LE(c0, r));
769 c0 =
VecXor(c1, VecShiftLeftOctet<8>(c0));
770 c0 = VMULL_00LE(vec_sl(c0, m1), r);
772 c2 =
VecXor(c2, VecShiftLeftOctet<8>(c1));
773 c1 = vec_sr(vec_mergeh(c1, c2), m63);
785 return GCM_Reduce_VMULL(c0, c1, c2, r);
788 inline uint64x2_p LoadHashKey(
const byte *hashKey)
790 #if (CRYPTOPP_BIG_ENDIAN) 792 const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
796 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
801 void GCM_SetKeyWithoutResync_VMULL(
const byte *hashKey, byte *mulTable,
unsigned int tableSize)
803 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
809 for (i=0; i<tableSize-32; i+=32)
811 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
813 std::memcpy(mulTable+i, temp+0, 8);
817 std::memcpy(mulTable+i+8, temp+0, 8);
818 h = GCM_Multiply_VMULL(h1, h0, r);
821 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
823 std::memcpy(mulTable+i, temp+0, 8);
827 std::memcpy(mulTable+i+8, temp+0, 8);
832 inline T SwapWords(
const T& data)
834 return (T)VecRotateLeftOctet<8>(data);
837 inline uint64x2_p LoadBuffer1(
const byte *dataBuffer)
839 #if (CRYPTOPP_BIG_ENDIAN) 843 const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
848 inline uint64x2_p LoadBuffer2(
const byte *dataBuffer)
850 #if (CRYPTOPP_BIG_ENDIAN) 857 size_t GCM_AuthenticateBlocks_VMULL(
const byte *data,
size_t len,
const byte *mtable, byte *hbuffer)
859 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
865 uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
876 d1 = LoadBuffer2(data);
878 c0 =
VecXor(c0, VMULL_00LE(d1, h0));
879 c2 =
VecXor(c2, VMULL_01LE(d1, h1));
880 d1 =
VecXor(d1, SwapWords(d1));
881 c1 =
VecXor(c1, VMULL_00LE(d1, h2));
885 d1 = LoadBuffer1(data+(s-i)*16-8);
886 c0 =
VecXor(c0, VMULL_01LE(d2, h0));
887 c2 =
VecXor(c2, VMULL_01LE(d1, h1));
889 c1 =
VecXor(c1, VMULL_01LE(d2, h2));
893 d1 = LoadBuffer2(data);
895 c0 =
VecXor(c0, VMULL_10LE(d1, h0));
896 c2 =
VecXor(c2, VMULL_11LE(d1, h1));
897 d1 =
VecXor(d1, SwapWords(d1));
898 c1 =
VecXor(c1, VMULL_10LE(d1, h2));
902 d2 = LoadBuffer2(data+(s-i)*16-8);
903 c0 =
VecXor(c0, VMULL_10LE(d1, h0));
904 c2 =
VecXor(c2, VMULL_10LE(d2, h1));
906 c1 =
VecXor(c1, VMULL_10LE(d1, h2));
912 x = GCM_Reduce_VMULL(c0, c1, c2, r);
919 void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
921 const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
924 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE Utility functions for the Crypto++ library.
T VecGetLow(const T val)
Extract a dword from a vector.
Library configuration file.
Access a block of memory.
T VecGetHigh(const T val)
Extract a dword from a vector.
Polynomial with Coefficients in GF(2)
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
bool VecNotEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
Support functions for PowerPC and vector operations.
T VecRotateLeftOctet(const T vec)
Rotate a vector left.
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
byte GetByte(size_t n) const
return the n-th byte
Access a block of memory.
Crypto++ library namespace.
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.