Crypto++  8.0
Free C++ class library of cryptographic schemes
gcm_simd.cpp
1 // gcm_simd.cpp - written and placed in the public domain by
2 // Jeffrey Walton, Uri Blumenthal and Marcel Raad.
3 // Original x86 CLMUL by Wei Dai. ARM and POWER8
4 // PMULL and VMULL by JW, UB and MR.
5 //
6 // This source file uses intrinsics to gain access to SSE4.2 and
7 // ARMv8a CRC-32 and CRC-32C instructions. A separate source file
8 // is needed because additional CXXFLAGS are required to enable
9 // the appropriate instructions sets in some build configurations.
10 
11 #include "pch.h"
12 #include "config.h"
13 #include "misc.h"
14 
15 #if defined(CRYPTOPP_DISABLE_GCM_ASM)
16 # undef CRYPTOPP_X86_ASM_AVAILABLE
17 # undef CRYPTOPP_X32_ASM_AVAILABLE
18 # undef CRYPTOPP_X64_ASM_AVAILABLE
19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE
20 #endif
21 
22 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23 # include <emmintrin.h>
24 # include <xmmintrin.h>
25 #endif
26 
27 #if (CRYPTOPP_CLMUL_AVAILABLE)
28 # include <tmmintrin.h>
29 # include <wmmintrin.h>
30 #endif
31 
32 #if (CRYPTOPP_ARM_NEON_AVAILABLE)
33 # include <arm_neon.h>
34 #endif
35 
36 #if (CRYPTOPP_ARM_ACLE_AVAILABLE)
37 # include <stdint.h>
38 # include <arm_acle.h>
39 #endif
40 
41 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
42 # include "ppc_simd.h"
43 #endif
44 
45 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
46 # include <signal.h>
47 # include <setjmp.h>
48 #endif
49 
50 #ifndef EXCEPTION_EXECUTE_HANDLER
51 # define EXCEPTION_EXECUTE_HANDLER 1
52 #endif
53 
54 // Clang __m128i casts, http://bugs.llvm.org/show_bug.cgi?id=20670
55 #define M128_CAST(x) ((__m128i *)(void *)(x))
56 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
57 
58 // GCC cast warning
59 #define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x))
60 #define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x))
61 
62 // Squash MS LNK4221 and libtool warnings
63 extern const char GCM_SIMD_FNAME[] = __FILE__;
64 
65 ANONYMOUS_NAMESPACE_BEGIN
66 
67 // *************************** ARM NEON *************************** //
68 
69 #if CRYPTOPP_ARM_PMULL_AVAILABLE
70 #if defined(__GNUC__)
71 // Schneiders, Hovsmith and O'Rourke used this trick.
72 // It results in much better code generation in production code
73 // by avoiding D-register spills when using vgetq_lane_u64. The
74 // problem does not surface under minimal test cases.
75 inline uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
76 {
77  uint64x2_t r;
78  __asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
79  :"=w" (r) : "w" (a), "w" (b) );
80  return r;
81 }
82 
83 inline uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
84 {
85  uint64x2_t r;
86  __asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
87  :"=w" (r) : "w" (a), "w" (vget_high_u64(b)) );
88  return r;
89 }
90 
91 inline uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
92 {
93  uint64x2_t r;
94  __asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
95  :"=w" (r) : "w" (vget_high_u64(a)), "w" (b) );
96  return r;
97 }
98 
99 inline uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
100 {
101  uint64x2_t r;
102  __asm __volatile("pmull2 %0.1q, %1.2d, %2.2d \n\t"
103  :"=w" (r) : "w" (a), "w" (b) );
104  return r;
105 }
106 
107 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b, unsigned int c)
108 {
109  uint64x2_t r;
110  __asm __volatile("ext %0.16b, %1.16b, %2.16b, %3 \n\t"
111  :"=w" (r) : "w" (a), "w" (b), "I" (c) );
112  return r;
113 }
114 
115 // https://github.com/weidai11/cryptopp/issues/366
116 template <unsigned int C>
117 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
118 {
119  uint64x2_t r;
120  __asm __volatile("ext %0.16b, %1.16b, %2.16b, %3 \n\t"
121  :"=w" (r) : "w" (a), "w" (b), "I" (C) );
122  return r;
123 }
124 #endif // GCC and compatibles
125 
126 #if defined(_MSC_VER)
127 inline uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
128 {
129  return (uint64x2_t)(vmull_p64(
130  vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
131  vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
132 }
133 
134 inline uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
135 {
136  return (uint64x2_t)(vmull_p64(
137  vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
138  vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
139 }
140 
141 inline uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
142 {
143  return (uint64x2_t)(vmull_p64(
144  vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
145  vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
146 }
147 
148 inline uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
149 {
150  return (uint64x2_t)(vmull_p64(
151  vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
152  vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
153 }
154 
155 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b, unsigned int c)
156 {
157  return (uint64x2_t)vextq_u8(
158  vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), c);
159 }
160 
161 // https://github.com/weidai11/cryptopp/issues/366
162 template <unsigned int C>
163 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
164 {
165  return (uint64x2_t)vextq_u8(
166  vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), C);
167 }
168 #endif // Microsoft and compatibles
169 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
170 
171 // ************************** Power 8 Crypto ************************** //
172 
173 #if CRYPTOPP_POWER8_VMULL_AVAILABLE
174 
177 using CryptoPP::VecGetLow;
180 
181 // POWER8 GCM mode is confusing. The algorithm is reflected so
182 // nearly everything we do is reversed for a little-endian system,
183 // including on big-endian machines. VMULL2LE swaps dwords for a
184 // little endian machine; VMULL_00LE, VMULL_01LE, VMULL_10LE and
185 // VMULL_11LE are backwards and (1) read low words with
186 // VecGetHigh, (2) read high words with VecGetLow, and
187 // (3) yields a product that is endian swapped. The steps ensures
188 // GCM parameters are presented in the correct order for the
189 // algorithm on both big and little-endian systems, but it is
190 // awful to try to follow the logic because it is so backwards.
191 // Because functions like VMULL_NN are so backwards we can't put
192 // them in ppc_simd.h. They simply don't work the way a typical
193 // user expects them to work.
194 
195 inline uint64x2_p VMULL2LE(const uint64x2_p& val)
196 {
197 #if (CRYPTOPP_BIG_ENDIAN)
198  return VecRotateLeftOctet<8>(val);
199 #else
200  return val;
201 #endif
202 }
203 
204 // _mm_clmulepi64_si128(a, b, 0x00)
205 inline uint64x2_p VMULL_00LE(const uint64x2_p& a, const uint64x2_p& b)
206 {
207 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
208  return VMULL2LE(__vpmsumd (VecGetHigh(a), VecGetHigh(b)));
209 #elif defined(__clang__)
210  return VMULL2LE(__builtin_altivec_crypto_vpmsumd (VecGetHigh(a), VecGetHigh(b)));
211 #else
212  return VMULL2LE(__builtin_crypto_vpmsumd (VecGetHigh(a), VecGetHigh(b)));
213 #endif
214 }
215 
216 // _mm_clmulepi64_si128(a, b, 0x01)
217 inline uint64x2_p VMULL_01LE(const uint64x2_p& a, const uint64x2_p& b)
218 {
219  // Small speedup. VecGetHigh(b) ensures the high dword of 'b' is 0.
220  // The 0 used in the vmull yields 0 for the high product, so the high
221  // dword of 'a' is "don't care".
222 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
223  return VMULL2LE(__vpmsumd (a, VecGetHigh(b)));
224 #elif defined(__clang__)
225  return VMULL2LE(__builtin_altivec_crypto_vpmsumd (a, VecGetHigh(b)));
226 #else
227  return VMULL2LE(__builtin_crypto_vpmsumd (a, VecGetHigh(b)));
228 #endif
229 }
230 
231 // _mm_clmulepi64_si128(a, b, 0x10)
232 inline uint64x2_p VMULL_10LE(const uint64x2_p& a, const uint64x2_p& b)
233 {
234  // Small speedup. VecGetHigh(a) ensures the high dword of 'a' is 0.
235  // The 0 used in the vmull yields 0 for the high product, so the high
236  // dword of 'b' is "don't care".
237 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
238  return VMULL2LE(__vpmsumd (VecGetHigh(a), b));
239 #elif defined(__clang__)
240  return VMULL2LE(__builtin_altivec_crypto_vpmsumd (VecGetHigh(a), b));
241 #else
242  return VMULL2LE(__builtin_crypto_vpmsumd (VecGetHigh(a), b));
243 #endif
244 }
245 
246 // _mm_clmulepi64_si128(a, b, 0x11)
247 inline uint64x2_p VMULL_11LE(const uint64x2_p& a, const uint64x2_p& b)
248 {
249  // Small speedup. VecGetLow(a) ensures the high dword of 'a' is 0.
250  // The 0 used in the vmull yields 0 for the high product, so the high
251  // dword of 'b' is "don't care".
252 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
253  return VMULL2LE(__vpmsumd (VecGetLow(a), b));
254 #elif defined(__clang__)
255  return VMULL2LE(__builtin_altivec_crypto_vpmsumd (VecGetLow(a), b));
256 #else
257  return VMULL2LE(__builtin_crypto_vpmsumd (VecGetLow(a), b));
258 #endif
259 }
260 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
261 
262 ANONYMOUS_NAMESPACE_END
263 
264 NAMESPACE_BEGIN(CryptoPP)
265 
266 // ************************* Feature Probes ************************* //
267 
268 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
269 extern "C" {
270  typedef void (*SigHandler)(int);
271 
272  static jmp_buf s_jmpSIGILL;
273  static void SigIllHandler(int)
274  {
275  longjmp(s_jmpSIGILL, 1);
276  }
277 }
278 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
279 
280 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
281 bool CPU_ProbePMULL()
282 {
283 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
284  return false;
285 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
286 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
287  volatile bool result = true;
288  __try
289  {
290  const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
291  const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
292  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
293  b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
294  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
295 
296  const poly128_t r1 = pmull_p64(a1, b1);
297  const poly128_t r2 = pmull_high_p64((poly64x2_t)(a2), (poly64x2_t)(b2));
298 
299  // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
300  const uint64x2_t t1 = (uint64x2_t)(r1); // {bignum,bignum}
301  const uint64x2_t t2 = (uint64x2_t)(r2); // {bignum,bignum}
302 
303  result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 &&
304  vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
305  vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 &&
306  vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
307  }
308  __except (EXCEPTION_EXECUTE_HANDLER)
309  {
310  return false;
311  }
312  return result;
313 # else
314 
315  // longjmp and clobber warnings. Volatile is required.
316  volatile bool result = true;
317 
318  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
319  if (oldHandler == SIG_ERR)
320  return false;
321 
322  volatile sigset_t oldMask;
323  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
324  return false;
325 
326  if (setjmp(s_jmpSIGILL))
327  result = false;
328  else
329  {
330  // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
331  const uint64x2_t a1={0,0x9090909090909090}, b1={0,0xb0b0b0b0b0b0b0b0};
332  const uint8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
333  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
334  b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
335  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
336 
337  const uint64x2_t r1 = PMULL_00(a1, b1);
338  const uint64x2_t r2 = PMULL_11((uint64x2_t)a2, (uint64x2_t)b2);
339 
340  result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
341  vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
342  vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
343  vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
344  }
345 
346  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
347  signal(SIGILL, oldHandler);
348  return result;
349 # endif
350 #else
351  return false;
352 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
353 }
354 #endif // ARM32 or ARM64
355 
356 #if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
357 bool CPU_ProbePMULL()
358 {
359 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
360  return false;
361 #elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
362  // longjmp and clobber warnings. Volatile is required.
363  volatile bool result = true;
364 
365  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
366  if (oldHandler == SIG_ERR)
367  return false;
368 
369  volatile sigset_t oldMask;
370  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
371  return false;
372 
373  if (setjmp(s_jmpSIGILL))
374  result = false;
375  else
376  {
377  const uint8x16_p a={0x0f,0x08,0x08,0x08, 0x80,0x80,0x80,0x80,
378  0x00,0x0a,0x0a,0x0a, 0xa0,0xa0,0xa0,0xa0},
379  b={0x0f,0xc0,0xc0,0xc0, 0x0c,0x0c,0x0c,0x0c,
380  0x00,0xe0,0xe0,0xe0, 0x0e,0x0e,0x0e,0x0e};
381 
382  const uint64x2_p r1 = VMULL_00LE((uint64x2_p)(a), (uint64x2_p)(b));
383  const uint64x2_p r2 = VMULL_01LE((uint64x2_p)(a), (uint64x2_p)(b));
384  const uint64x2_p r3 = VMULL_10LE((uint64x2_p)(a), (uint64x2_p)(b));
385  const uint64x2_p r4 = VMULL_11LE((uint64x2_p)(a), (uint64x2_p)(b));
386 
387  result = VecNotEqual(r1, r2) && VecNotEqual(r3, r4);
388  }
389 
390  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
391  signal(SIGILL, oldHandler);
392  return result;
393 #else
394  return false;
395 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
396 }
397 #endif // PPC32 or PPC64
398 
399 // *************************** ARM NEON *************************** //
400 
401 #if CRYPTOPP_ARM_NEON_AVAILABLE
402 void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
403 {
404  CRYPTOPP_ASSERT(IsAlignedOn(a,GetAlignmentOf<uint64x2_t>()));
405  CRYPTOPP_ASSERT(IsAlignedOn(b,GetAlignmentOf<uint64x2_t>()));
406  CRYPTOPP_ASSERT(IsAlignedOn(c,GetAlignmentOf<uint64x2_t>()));
407  *UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
408 }
409 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
410 
411 #if CRYPTOPP_ARM_PMULL_AVAILABLE
412 
413 // Swaps high and low 64-bit words
414 inline uint64x2_t SwapWords(const uint64x2_t& data)
415 {
416  return (uint64x2_t)vcombine_u64(
417  vget_high_u64(data), vget_low_u64(data));
418 }
419 
420 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
421 {
422  c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
423  c1 = veorq_u64(c1, PMULL_01(c0, r));
424  c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
425  c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
426  c0 = PMULL_00(c0, r);
427  c2 = veorq_u64(c2, c0);
428  c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
429  c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
430  c2 = vshlq_n_u64(c2, 1);
431 
432  return veorq_u64(c2, c1);
433 }
434 
435 uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
436 {
437  const uint64x2_t c0 = PMULL_00(x, h);
438  const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
439  const uint64x2_t c2 = PMULL_11(x, h);
440 
441  return GCM_Reduce_PMULL(c0, c1, c2, r);
442 }
443 
444 void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
445 {
446  const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
447  const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
448  const uint64x2_t h0 = vextq_u64(t, t, 1);
449 
450  uint64x2_t h = h0;
451  unsigned int i;
452  for (i=0; i<tableSize-32; i+=32)
453  {
454  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
455  vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
456  vst1q_u64((uint64_t *)(mulTable+i+16), h1);
457  vst1q_u64((uint64_t *)(mulTable+i+8), h);
458  vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
459  h = GCM_Multiply_PMULL(h1, h0, r);
460  }
461 
462  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
463  vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
464  vst1q_u64((uint64_t *)(mulTable+i+16), h1);
465  vst1q_u64((uint64_t *)(mulTable+i+8), h);
466  vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
467 }
468 
469 size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
470 {
471  const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
472  uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
473 
474  while (len >= 16)
475  {
476  size_t i=0, s = UnsignedMin(len/16U, 8U);
477  uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
478  uint64x2_t c0 = vdupq_n_u64(0);
479  uint64x2_t c1 = vdupq_n_u64(0);
480  uint64x2_t c2 = vdupq_n_u64(0);
481 
482  while (true)
483  {
484  const uint64x2_t h0 = vld1q_u64((const uint64_t*)(mtable+(i+0)*16));
485  const uint64x2_t h1 = vld1q_u64((const uint64_t*)(mtable+(i+1)*16));
486  const uint64x2_t h2 = veorq_u64(h0, h1);
487 
488  if (++i == s)
489  {
490  const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
491  d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
492  c0 = veorq_u64(c0, PMULL_00(d1, h0));
493  c2 = veorq_u64(c2, PMULL_10(d1, h1));
494  d1 = veorq_u64(d1, SwapWords(d1));
495  c1 = veorq_u64(c1, PMULL_00(d1, h2));
496 
497  break;
498  }
499 
500  d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
501  c0 = veorq_u64(c0, PMULL_10(d2, h0));
502  c2 = veorq_u64(c2, PMULL_10(d1, h1));
503  d2 = veorq_u64(d2, d1);
504  c1 = veorq_u64(c1, PMULL_10(d2, h2));
505 
506  if (++i == s)
507  {
508  const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
509  d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
510  c0 = veorq_u64(c0, PMULL_01(d1, h0));
511  c2 = veorq_u64(c2, PMULL_11(d1, h1));
512  d1 = veorq_u64(d1, SwapWords(d1));
513  c1 = veorq_u64(c1, PMULL_01(d1, h2));
514 
515  break;
516  }
517 
518  const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
519  d2 = vextq_u64(t3, t3, 1);
520  c0 = veorq_u64(c0, PMULL_01(d1, h0));
521  c2 = veorq_u64(c2, PMULL_01(d2, h1));
522  d1 = veorq_u64(d1, d2);
523  c1 = veorq_u64(c1, PMULL_01(d1, h2));
524  }
525  data += s*16;
526  len -= s*16;
527 
528  c1 = veorq_u64(veorq_u64(c1, c0), c2);
529  x = GCM_Reduce_PMULL(c0, c1, c2, r);
530  }
531 
532  vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
533  return len;
534 }
535 
536 void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
537 {
539  {
540  const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
541  vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
542  }
543 }
544 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
545 
546 // ***************************** SSE ***************************** //
547 
548 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
549 // SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
550 // a source file with a SSE architecture switch. Also see GH #226 and GH #284.
551 void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
552 {
553 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
554  asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
555  : "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
556 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
557  _mm_store_si128(M128_CAST(a), _mm_xor_si128(
558  _mm_load_si128(CONST_M128_CAST(b)),
559  _mm_load_si128(CONST_M128_CAST(c))));
560 # endif
561 }
562 #endif // CRYPTOPP_SSE2_ASM_AVAILABLE
563 
564 #if CRYPTOPP_CLMUL_AVAILABLE
565 
566 #if 0
567 // preserved for testing
568 void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
569 {
570  word64 Z0=0, Z1=0, V0, V1;
571 
573  Block::Get(a)(V0)(V1);
574 
575  for (int i=0; i<16; i++)
576  {
577  for (int j=0x80; j!=0; j>>=1)
578  {
579  int x = b[i] & j;
580  Z0 ^= x ? V0 : 0;
581  Z1 ^= x ? V1 : 0;
582  x = (int)V1 & 1;
583  V1 = (V1>>1) | (V0<<63);
584  V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
585  }
586  }
587  Block::Put(NULLPTR, c)(Z0)(Z1);
588 }
589 
590 __m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
591 {
592  word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
593  word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
594 
595  PolynomialMod2 pa((byte *)A, 8);
596  PolynomialMod2 pb((byte *)B, 8);
597  PolynomialMod2 c = pa*pb;
598 
599  __m128i output;
600  for (int i=0; i<16; i++)
601  ((byte *)&output)[i] = c.GetByte(i);
602  return output;
603 }
604 #endif // Testing
605 
606 // Swaps high and low 64-bit words
607 inline __m128i SwapWords(const __m128i& val)
608 {
609  return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
610 }
611 
612 // SunCC 5.11-5.15 compiler crash. Make the function inline
613 // and parameters non-const. Also see GH #188 and GH #224.
614 inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i& r)
615 {
616  /*
617  The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
618  significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
619  rightmost bit positions, and the lowest byte addresses.
620 
621  c1 ^= c0t * 0xc200000000000000
622  c2t ^= c0t
623  t = shift (c1t ^ c0b) left 1 bit
624  c2 ^= t * 0xe100000000000000
625  c2t ^= c1b
626  shift c2 left 1 bit and xor in lowest bit of c1t
627  */
628  c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
629  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
630  c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
631  c0 = _mm_slli_epi64(c0, 1);
632  c0 = _mm_clmulepi64_si128(c0, r, 0);
633  c2 = _mm_xor_si128(c2, c0);
634  c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
635  c1 = _mm_unpacklo_epi64(c1, c2);
636  c1 = _mm_srli_epi64(c1, 63);
637  c2 = _mm_slli_epi64(c2, 1);
638  return _mm_xor_si128(c2, c1);
639 }
640 
641 // SunCC 5.13-5.14 compiler crash. Don't make the function inline.
642 // This is in contrast to GCM_Reduce_CLMUL, which must be inline.
643 __m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
644 {
645  const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
646  const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
647  const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
648 
649  return GCM_Reduce_CLMUL(c0, c1, c2, r);
650 }
651 
652 void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
653 {
654  const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
655  const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
656  __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
657 
658  unsigned int i;
659  for (i=0; i<tableSize-32; i+=32)
660  {
661  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
662  _mm_storel_epi64(M128_CAST(mulTable+i), h);
663  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
664  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
665  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
666  h = GCM_Multiply_CLMUL(h1, h0, r);
667  }
668 
669  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
670  _mm_storel_epi64(M128_CAST(mulTable+i), h);
671  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
672  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
673  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
674 }
675 
676 size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
677 {
678  const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
679  const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
680  const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
681  __m128i x = _mm_load_si128(M128_CAST(hbuffer));
682 
683  while (len >= 16)
684  {
685  size_t i=0, s = UnsignedMin(len/16, 8U);
686  __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
687  __m128i d2 = _mm_shuffle_epi8(d1, m2);
688  __m128i c0 = _mm_setzero_si128();
689  __m128i c1 = _mm_setzero_si128();
690  __m128i c2 = _mm_setzero_si128();
691 
692  while (true)
693  {
694  const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
695  const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
696  const __m128i h2 = _mm_xor_si128(h0, h1);
697 
698  if (++i == s)
699  {
700  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
701  d1 = _mm_xor_si128(d1, x);
702  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
703  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
704  d1 = _mm_xor_si128(d1, SwapWords(d1));
705  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
706  break;
707  }
708 
709  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
710  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
711  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
712  d2 = _mm_xor_si128(d2, d1);
713  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
714 
715  if (++i == s)
716  {
717  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
718  d1 = _mm_xor_si128(d1, x);
719  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
720  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
721  d1 = _mm_xor_si128(d1, SwapWords(d1));
722  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
723  break;
724  }
725 
726  d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
727  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
728  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
729  d1 = _mm_xor_si128(d1, d2);
730  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
731  }
732  data += s*16;
733  len -= s*16;
734 
735  c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
736  x = GCM_Reduce_CLMUL(c0, c1, c2, r);
737  }
738 
739  _mm_store_si128(M128_CAST(hbuffer), x);
740  return len;
741 }
742 
743 void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
744 {
745  // SSSE3 instruction, but only used with CLMUL
746  const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
747  _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
748  _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
749 }
750 #endif // CRYPTOPP_CLMUL_AVAILABLE
751 
752 // ***************************** POWER8 ***************************** //
753 
754 #if CRYPTOPP_POWER7_AVAILABLE
755 void GCM_Xor16_POWER7(byte *a, const byte *b, const byte *c)
756 {
757  VecStore(VecXor(VecLoad(b), VecLoad(c)), a);
758 }
759 #endif // CRYPTOPP_POWER7_AVAILABLE
760 
761 #if CRYPTOPP_POWER8_VMULL_AVAILABLE
762 
763 uint64x2_p GCM_Reduce_VMULL(uint64x2_p c0, uint64x2_p c1, uint64x2_p c2, uint64x2_p r)
764 {
765  const uint64x2_p m1 = {1,1}, m63 = {63,63};
766 
767  c1 = VecXor(c1, VecShiftRightOctet<8>(c0));
768  c1 = VecXor(c1, VMULL_10LE(c0, r));
769  c0 = VecXor(c1, VecShiftLeftOctet<8>(c0));
770  c0 = VMULL_00LE(vec_sl(c0, m1), r);
771  c2 = VecXor(c2, c0);
772  c2 = VecXor(c2, VecShiftLeftOctet<8>(c1));
773  c1 = vec_sr(vec_mergeh(c1, c2), m63);
774  c2 = vec_sl(c2, m1);
775 
776  return VecXor(c2, c1);
777 }
778 
779 inline uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r)
780 {
781  const uint64x2_p c0 = VMULL_00LE(x, h);
782  const uint64x2_p c1 = VecXor(VMULL_01LE(x, h), VMULL_10LE(x, h));
783  const uint64x2_p c2 = VMULL_11LE(x, h);
784 
785  return GCM_Reduce_VMULL(c0, c1, c2, r);
786 }
787 
788 inline uint64x2_p LoadHashKey(const byte *hashKey)
789 {
790 #if (CRYPTOPP_BIG_ENDIAN)
791  const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
792  const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
793  return VecPermute(key, key, mask);
794 #else
795  const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
796  const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
797  return VecPermute(key, key, mask);
798 #endif
799 }
800 
801 void GCM_SetKeyWithoutResync_VMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
802 {
803  const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
804  uint64x2_p h = LoadHashKey(hashKey), h0 = h;
805 
806  unsigned int i;
807  uint64_t temp[2];
808 
809  for (i=0; i<tableSize-32; i+=32)
810  {
811  const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
812  VecStore(h, (byte*)temp);
813  std::memcpy(mulTable+i, temp+0, 8);
814  VecStore(h1, mulTable+i+16);
815  VecStore(h, mulTable+i+8);
816  VecStore(h1, (byte*)temp);
817  std::memcpy(mulTable+i+8, temp+0, 8);
818  h = GCM_Multiply_VMULL(h1, h0, r);
819  }
820 
821  const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
822  VecStore(h, (byte*)temp);
823  std::memcpy(mulTable+i, temp+0, 8);
824  VecStore(h1, mulTable+i+16);
825  VecStore(h, mulTable+i+8);
826  VecStore(h1, (byte*)temp);
827  std::memcpy(mulTable+i+8, temp+0, 8);
828 }
829 
830 // Swaps high and low 64-bit words
831 template <class T>
832 inline T SwapWords(const T& data)
833 {
834  return (T)VecRotateLeftOctet<8>(data);
835 }
836 
837 inline uint64x2_p LoadBuffer1(const byte *dataBuffer)
838 {
839 #if (CRYPTOPP_BIG_ENDIAN)
840  return (uint64x2_p)VecLoad(dataBuffer);
841 #else
842  const uint64x2_p data = (uint64x2_p)VecLoad(dataBuffer);
843  const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
844  return VecPermute(data, data, mask);
845 #endif
846 }
847 
848 inline uint64x2_p LoadBuffer2(const byte *dataBuffer)
849 {
850 #if (CRYPTOPP_BIG_ENDIAN)
851  return (uint64x2_p)SwapWords(VecLoadBE(dataBuffer));
852 #else
853  return (uint64x2_p)VecLoadBE(dataBuffer);
854 #endif
855 }
856 
857 size_t GCM_AuthenticateBlocks_VMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
858 {
859  const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
860  uint64x2_p x = (uint64x2_p)VecLoad(hbuffer);
861 
862  while (len >= 16)
863  {
864  size_t i=0, s = UnsignedMin(len/16, 8U);
865  uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
866  uint64x2_p c0 = {0}, c1 = {0}, c2 = {0};
867 
868  while (true)
869  {
870  const uint64x2_p h0 = (uint64x2_p)VecLoad(mtable+(i+0)*16);
871  const uint64x2_p h1 = (uint64x2_p)VecLoad(mtable+(i+1)*16);
872  const uint64x2_p h2 = (uint64x2_p)VecXor(h0, h1);
873 
874  if (++i == s)
875  {
876  d1 = LoadBuffer2(data);
877  d1 = VecXor(d1, x);
878  c0 = VecXor(c0, VMULL_00LE(d1, h0));
879  c2 = VecXor(c2, VMULL_01LE(d1, h1));
880  d1 = VecXor(d1, SwapWords(d1));
881  c1 = VecXor(c1, VMULL_00LE(d1, h2));
882  break;
883  }
884 
885  d1 = LoadBuffer1(data+(s-i)*16-8);
886  c0 = VecXor(c0, VMULL_01LE(d2, h0));
887  c2 = VecXor(c2, VMULL_01LE(d1, h1));
888  d2 = VecXor(d2, d1);
889  c1 = VecXor(c1, VMULL_01LE(d2, h2));
890 
891  if (++i == s)
892  {
893  d1 = LoadBuffer2(data);
894  d1 = VecXor(d1, x);
895  c0 = VecXor(c0, VMULL_10LE(d1, h0));
896  c2 = VecXor(c2, VMULL_11LE(d1, h1));
897  d1 = VecXor(d1, SwapWords(d1));
898  c1 = VecXor(c1, VMULL_10LE(d1, h2));
899  break;
900  }
901 
902  d2 = LoadBuffer2(data+(s-i)*16-8);
903  c0 = VecXor(c0, VMULL_10LE(d1, h0));
904  c2 = VecXor(c2, VMULL_10LE(d2, h1));
905  d1 = VecXor(d1, d2);
906  c1 = VecXor(c1, VMULL_10LE(d1, h2));
907  }
908  data += s*16;
909  len -= s*16;
910 
911  c1 = VecXor(VecXor(c1, c0), c2);
912  x = GCM_Reduce_VMULL(c0, c1, c2, r);
913  }
914 
915  VecStore(x, hbuffer);
916  return len;
917 }
918 
919 void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
920 {
921  const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
922  VecStore(VecPermute(VecLoad(hashBuffer), mask), hashBuffer);
923 }
924 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
925 
926 NAMESPACE_END
Utility functions for the Crypto++ library.
T VecGetLow(const T val)
Extract a dword from a vector.
Definition: ppc_simd.h:1255
Library configuration file.
Access a block of memory.
Definition: misc.h:2466
T VecGetHigh(const T val)
Extract a dword from a vector.
Definition: ppc_simd.h:1275
Polynomial with Coefficients in GF(2)
Definition: gf2n.h:26
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
Definition: misc.h:1085
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
Definition: ppc_simd.h:875
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Definition: ppc_simd.h:128
bool VecNotEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
Definition: ppc_simd.h:1312
Support functions for PowerPC and vector operations.
Precompiled header file.
T VecRotateLeftOctet(const T vec)
Rotate a vector left.
Definition: ppc_simd.h:1124
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
Definition: misc.h:602
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
Definition: ppc_simd.h:600
byte order is big-endian
Definition: cryptlib.h:147
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
Definition: trap.h:60
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
Definition: ppc_simd.h:945
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Definition: ppc_simd.h:138
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:438
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Definition: misc.h:1122
byte GetByte(size_t n) const
return the n-th byte
Definition: gf2n.cpp:98
Access a block of memory.
Definition: misc.h:2429
Crypto++ library namespace.
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:251
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:1906
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
Definition: ppc_simd.h:118