67 #ifndef CRYPTOPP_PPC_CRYPTO_H 68 #define CRYPTOPP_PPC_CRYPTO_H 73 #if defined(__ALTIVEC__) 82 #if defined(_AIX) && defined(_ARCH_PWR8) && (__xlC__ >= 0xd01) 97 #if defined(__xlc__) && (__xlc__ < 0x0d01) 98 # define __early_xlc__ 1 100 #if defined(__xlC__) && (__xlC__ < 0x0d01) 101 # define __early_xlC__ 1 106 #if CRYPTOPP_GCC_DIAGNOSTIC_AVAILABLE 107 # pragma GCC diagnostic push 108 # pragma GCC diagnostic ignored "-Wdeprecated" 113 #if defined(__ALTIVEC__) || defined(CRYPTOPP_DOXYGEN_PROCESSING) 131 #if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING) 147 const uint32x4_p v = {0,0,0,0};
156 const uint32x4_p v = {1,1,1,1};
172 return (T)vec_revb((uint8x16_p)data);
174 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
175 return (T)vec_perm(data, data, mask);
196 uintptr_t eff =
reinterpret_cast<uintptr_t
>(src)+0;
199 return (uint32x4_p)vec_ld(0, src);
204 const uint8x16_p perm = vec_lvsl(0, src);
205 const uint8x16_p low = vec_ld(0, src);
206 const uint8x16_p high = vec_ld(15, src);
207 return (uint32x4_p)vec_perm(low, high, perm);
226 uintptr_t eff =
reinterpret_cast<uintptr_t
>(src)+off;
229 return (uint32x4_p)vec_ld(off, src);
234 const uint8x16_p perm = vec_lvsl(off, src);
235 const uint8x16_p low = vec_ld(off, src);
236 const uint8x16_p high = vec_ld(15, src);
237 return (uint32x4_p)vec_perm(low, high, perm);
255 #if defined(_ARCH_PWR8) 256 # if defined(__early_xlc__) || defined(__early_xlC__) 257 return (uint32x4_p)vec_xlw4(0, (byte*)src);
258 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 259 return (uint32x4_p)vec_xl(0, (byte*)src);
261 return (uint32x4_p)vec_vsx_ld(0, (byte*)src);
281 inline uint32x4_p
VecLoad(
int off,
const byte src[16])
283 #if defined(_ARCH_PWR8) 284 # if defined(__early_xlc__) || defined(__early_xlC__) 285 return (uint32x4_p)vec_xlw4(off, (byte*)src);
286 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 287 return (uint32x4_p)vec_xl(off, (byte*)src);
289 return (uint32x4_p)vec_vsx_ld(off, (byte*)src);
308 inline uint32x4_p
VecLoad(
const word32 src[4])
310 return VecLoad((
const byte*)src);
326 inline uint32x4_p
VecLoad(
int off,
const word32 src[4])
328 return VecLoad(off, (
const byte*)src);
331 #if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING) 346 inline uint64x2_p
VecLoad(
const word64 src[2])
348 return (uint64x2_p)
VecLoad((
const byte*)src);
365 inline uint64x2_p
VecLoad(
int off,
const word64 src[2])
367 return (uint64x2_p)
VecLoad(off, (
const byte*)src);
385 #if defined(_ARCH_PWR8) 386 # if defined(__early_xlc__) || defined(__early_xlC__) 387 return (uint32x4_p)vec_xlw4(0, (byte*)src);
388 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 389 return (uint32x4_p)vec_xl(0, (byte*)src);
391 return (uint32x4_p)vec_vsx_ld(0, (byte*)src);
395 return (uint32x4_p)vec_ld(0, (byte*)src);
413 #if defined(_ARCH_PWR8) 414 # if defined(__early_xlc__) || defined(__early_xlC__) 415 return (uint32x4_p)vec_xlw4(off, (byte*)src);
416 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 417 return (uint32x4_p)vec_xl(off, (byte*)src);
419 return (uint32x4_p)vec_vsx_ld(off, (byte*)src);
423 return (uint32x4_p)vec_ld(off, (byte*)src);
442 #if defined(_ARCH_PWR8) 443 # if defined(__early_xlc__) || defined(__early_xlC__) 444 # if (CRYPTOPP_BIG_ENDIAN) 445 return (uint32x4_p)vec_xlw4(0, (byte*)src);
447 return (uint32x4_p)
VecReverse(vec_xlw4(0, (byte*)src));
449 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 450 return (uint32x4_p)vec_xl_be(0, (byte*)src);
452 # if (CRYPTOPP_BIG_ENDIAN) 453 return (uint32x4_p)vec_vsx_ld(0, (byte*)src);
455 return (uint32x4_p)
VecReverse(vec_vsx_ld(0, (byte*)src));
459 # if (CRYPTOPP_BIG_ENDIAN) 460 return (uint32x4_p)
VecLoad((
const byte*)src);
481 inline uint32x4_p
VecLoadBE(
int off,
const byte src[16])
483 #if defined(_ARCH_PWR8) 484 # if defined(__early_xlc__) || defined(__early_xlC__) 485 # if (CRYPTOPP_BIG_ENDIAN) 486 return (uint32x4_p)vec_xlw4(off, (byte*)src);
488 return (uint32x4_p)
VecReverse(vec_xlw4(off, (byte*)src));
490 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 491 return (uint32x4_p)vec_xl_be(off, (byte*)src);
493 # if (CRYPTOPP_BIG_ENDIAN) 494 return (uint32x4_p)vec_vsx_ld(off, (byte*)src);
496 return (uint32x4_p)
VecReverse(vec_vsx_ld(off, (byte*)src));
500 # if (CRYPTOPP_BIG_ENDIAN) 501 return (uint32x4_p)
VecLoad(off, (
const byte*)src);
531 uintptr_t eff =
reinterpret_cast<uintptr_t
>(dest)+0;
534 vec_st((uint8x16_p)data, 0, dest);
539 uint8x16_p perm = (
uint8x16_p)vec_perm(data, data, vec_lvsr(0, dest));
540 vec_ste((uint8x16_p) perm, 0, (
unsigned char*) dest);
541 vec_ste((uint16x8_p) perm, 1, (
unsigned short*)dest);
542 vec_ste((uint32x4_p) perm, 3, (
unsigned int*) dest);
543 vec_ste((uint32x4_p) perm, 4, (
unsigned int*) dest);
544 vec_ste((uint32x4_p) perm, 8, (
unsigned int*) dest);
545 vec_ste((uint32x4_p) perm, 12, (
unsigned int*) dest);
546 vec_ste((uint16x8_p) perm, 14, (
unsigned short*)dest);
547 vec_ste((uint8x16_p) perm, 15, (
unsigned char*) dest);
570 uintptr_t eff =
reinterpret_cast<uintptr_t
>(dest)+off;
573 vec_st((uint8x16_p)data, off, dest);
578 uint8x16_p perm = (
uint8x16_p)vec_perm(data, data, vec_lvsr(off, dest));
579 vec_ste((uint8x16_p) perm, 0, (
unsigned char*) dest);
580 vec_ste((uint16x8_p) perm, 1, (
unsigned short*)dest);
581 vec_ste((uint32x4_p) perm, 3, (
unsigned int*) dest);
582 vec_ste((uint32x4_p) perm, 4, (
unsigned int*) dest);
583 vec_ste((uint32x4_p) perm, 8, (
unsigned int*) dest);
584 vec_ste((uint32x4_p) perm, 12, (
unsigned int*) dest);
585 vec_ste((uint16x8_p) perm, 14, (
unsigned short*)dest);
586 vec_ste((uint8x16_p) perm, 15, (
unsigned char*) dest);
607 #if defined(_ARCH_PWR8) 608 # if defined(__early_xlc__) || defined(__early_xlC__) 609 vec_xstw4((uint8x16_p)data, 0, (byte*)dest);
610 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 611 vec_xst((uint8x16_p)data, 0, (byte*)dest);
613 vec_vsx_st((uint8x16_p)data, 0, (byte*)dest);
636 inline void VecStore(
const T data,
int off, byte dest[16])
638 #if defined(_ARCH_PWR8) 639 # if defined(__early_xlc__) || defined(__early_xlC__) 640 vec_xstw4((uint8x16_p)data, off, (byte*)dest);
641 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 642 vec_xst((uint8x16_p)data, off, (byte*)dest);
644 vec_vsx_st((uint8x16_p)data, off, (byte*)dest);
668 VecStore((uint8x16_p)data, 0, (byte*)dest);
687 inline void VecStore(
const T data,
int off, word32 dest[4])
689 VecStore((uint8x16_p)data, off, (byte*)dest);
710 VecStore((uint8x16_p)data, 0, (byte*)dest);
730 inline void VecStore(
const T data,
int off, word64 dest[2])
732 VecStore((uint8x16_p)data, off, (byte*)dest);
753 #if defined(_ARCH_PWR8) 754 # if defined(__early_xlc__) || defined(__early_xlC__) 755 # if (CRYPTOPP_BIG_ENDIAN) 756 vec_xstw4((uint8x16_p)data, 0, (byte*)dest);
758 vec_xstw4((uint8x16_p)
VecReverse(data), 0, (byte*)dest);
760 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 761 vec_xst_be((uint8x16_p)data, 0, (byte*)dest);
763 # if (CRYPTOPP_BIG_ENDIAN) 764 vec_vsx_st((uint8x16_p)data, 0, (byte*)dest);
766 vec_vsx_st((uint8x16_p)
VecReverse(data), 0, (byte*)dest);
770 # if (CRYPTOPP_BIG_ENDIAN) 797 #if defined(_ARCH_PWR8) 798 # if defined(__early_xlc__) || defined(__early_xlC__) 799 # if (CRYPTOPP_BIG_ENDIAN) 800 vec_xstw4((uint8x16_p)data, off, (byte*)dest);
802 vec_xstw4((uint8x16_p)
VecReverse(data), off, (byte*)dest);
804 # elif defined(__xlc__) || defined(__xlC__) || defined(__clang__) 805 vec_xst_be((uint8x16_p)data, off, (byte*)dest);
807 # if (CRYPTOPP_BIG_ENDIAN) 808 vec_vsx_st((uint8x16_p)data, off, (byte*)dest);
810 vec_vsx_st((uint8x16_p)
VecReverse(data), off, (byte*)dest);
814 # if (CRYPTOPP_BIG_ENDIAN) 840 return VecStoreBE((uint8x16_p)data, (byte*)dest);
860 inline void VecStoreBE(
const T data,
int off, word32 dest[4])
862 return VecStoreBE((uint8x16_p)data, off, (byte*)dest);
881 template <
class T1,
class T2>
882 inline T1
VecAnd(
const T1 vec1,
const T2 vec2)
884 return (T1)vec_and(vec1, (T1)vec2);
898 template <
class T1,
class T2>
899 inline T1
VecOr(
const T1 vec1,
const T2 vec2)
901 return (T1)vec_or(vec1, (T1)vec2);
915 template <
class T1,
class T2>
916 inline T1
VecXor(
const T1 vec1,
const T2 vec2)
918 return (T1)vec_xor(vec1, (T1)vec2);
938 template <
class T1,
class T2>
939 inline T1
VecAdd(
const T1 vec1,
const T2 vec2)
941 return (T1)vec_add(vec1, (T1)vec2);
955 template <
class T1,
class T2>
956 inline T1
VecSub(
const T1 vec1,
const T2 vec2)
958 return (T1)vec_sub(vec1, (T1)vec2);
974 inline uint32x4_p
VecAdd64(
const uint32x4_p& vec1,
const uint32x4_p& vec2)
977 #if defined(_ARCH_PWR8) 978 return (uint32x4_p)vec_add((uint64x2_p)vec1, (uint64x2_p)vec2);
983 const uint8x16_p cmask = {4,5,6,7, 16,16,16,16, 12,13,14,15, 16,16,16,16};
984 const uint32x4_p zero = {0, 0, 0, 0};
986 uint32x4_p cy = vec_addc(vec1, vec2);
987 cy = vec_perm(cy, zero, cmask);
988 return vec_add(vec_add(vec1, vec2), cy);
1009 template <
class T1,
class T2>
1012 return (T1)vec_perm(vec, vec, (uint8x16_p)mask);
1028 template <
class T1,
class T2>
1029 inline T1
VecPermute(
const T1 vec1,
const T1 vec2,
const T2 mask)
1031 return (T1)vec_perm(vec1, (T1)vec2, (uint8x16_p)mask);
1055 template <
unsigned int C,
class T>
1071 #if (CRYPTOPP_BIG_ENDIAN) 1073 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)zero, R);
1075 enum { R=(16-C)&0xf };
1076 return (T)vec_sld((uint8x16_p)zero, (uint8x16_p)vec, R);
1102 template <
unsigned int C,
class T>
1118 #if (CRYPTOPP_BIG_ENDIAN) 1119 enum { R=(16-C)&0xf };
1120 return (T)vec_sld((uint8x16_p)zero, (uint8x16_p)vec, R);
1123 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)zero, R);
1141 template <
unsigned int C,
class T>
1144 #if (CRYPTOPP_BIG_ENDIAN) 1146 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
1148 enum { R=(16-C)&0xf };
1149 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
1166 template <
unsigned int C,
class T>
1169 #if (CRYPTOPP_BIG_ENDIAN) 1170 enum { R=(16-C)&0xf };
1171 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
1174 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
1186 template<
unsigned int C>
1189 const uint32x4_p m = {C, C, C, C};
1190 return vec_rl(vec, m);
1201 template<
unsigned int C>
1204 const uint32x4_p m = {C, C, C, C};
1205 return vec_sl(vec, m);
1219 return vec_mergeh(vec1, vec2);
1233 return vec_mergel(vec1, vec2);
1236 #if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING) 1247 template<
unsigned int C>
1250 const uint64x2_p m = {C, C};
1251 return vec_rl(vec, m);
1263 template<
unsigned int C>
1266 const uint64x2_p m = {C, C};
1267 return vec_sl(vec, m);
1280 template<
unsigned int C>
1283 const uint32x4_p m = {32-C, 32-C, 32-C, 32-C};
1284 return vec_rl(vec, m);
1295 template<
unsigned int C>
1298 const uint32x4_p m = {C, C, C, C};
1299 return vec_sr(vec, m);
1302 #if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING) 1313 template<
unsigned int C>
1316 const uint64x2_p m = {64-C, 64-C};
1317 return vec_rl(vec, m);
1329 template<
unsigned int C>
1332 const uint64x2_p m = {C, C};
1333 return vec_sr(vec, m);
1348 return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, 8);
1365 #if (CRYPTOPP_BIG_ENDIAN) && (_ARCH_PWR8) 1367 return (T)
VecMergeLow((uint64x2_p)zero, (uint64x2_p)val);
1369 return VecShiftRightOctet<8>(VecShiftLeftOctet<8>(val));
1387 #if (CRYPTOPP_BIG_ENDIAN) && (_ARCH_PWR8) 1389 return (T)
VecMergeHigh((uint64x2_p)zero, (uint64x2_p)val);
1391 return VecShiftRightOctet<8>(val);
1406 template <
class T1,
class T2>
1409 return 1 == vec_all_eq((uint32x4_p)vec1, (uint32x4_p)vec2);
1423 template <
class T1,
class T2>
1426 return 0 == vec_all_eq((uint32x4_p)vec1, (uint32x4_p)vec2);
1433 #if defined(__CRYPTO__) || defined(CRYPTOPP_DOXYGEN_PROCESSING) 1454 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1455 return __vpmsumw (a, b);
1456 #elif defined(__clang__) 1457 return __builtin_altivec_crypto_vpmsumw (a, b);
1459 return __builtin_crypto_vpmsumw (a, b);
1479 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1480 return __vpmsumd (a, b);
1481 #elif defined(__clang__) 1482 return __builtin_altivec_crypto_vpmsumd (a, b);
1484 return __builtin_crypto_vpmsumd (a, b);
1503 #if (CRYPTOPP_BIG_ENDIAN) 1525 #if (CRYPTOPP_BIG_ENDIAN) 1547 #if (CRYPTOPP_BIG_ENDIAN) 1569 #if (CRYPTOPP_BIG_ENDIAN) 1592 template <
class T1,
class T2>
1595 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1596 return (T1)__vcipher((uint8x16_p)state, (uint8x16_p)key);
1597 #elif defined(__clang__) 1598 return (T1)__builtin_altivec_crypto_vcipher((uint64x2_p)state, (uint64x2_p)key);
1599 #elif defined(__GNUC__) 1600 return (T1)__builtin_crypto_vcipher((uint64x2_p)state, (uint64x2_p)key);
1617 template <
class T1,
class T2>
1620 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1621 return (T1)__vcipherlast((uint8x16_p)state, (uint8x16_p)key);
1622 #elif defined(__clang__) 1623 return (T1)__builtin_altivec_crypto_vcipherlast((uint64x2_p)state, (uint64x2_p)key);
1624 #elif defined(__GNUC__) 1625 return (T1)__builtin_crypto_vcipherlast((uint64x2_p)state, (uint64x2_p)key);
1642 template <
class T1,
class T2>
1645 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1646 return (T1)__vncipher((uint8x16_p)state, (uint8x16_p)key);
1647 #elif defined(__clang__) 1648 return (T1)__builtin_altivec_crypto_vncipher((uint64x2_p)state, (uint64x2_p)key);
1649 #elif defined(__GNUC__) 1650 return (T1)__builtin_crypto_vncipher((uint64x2_p)state, (uint64x2_p)key);
1667 template <
class T1,
class T2>
1670 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1671 return (T1)__vncipherlast((uint8x16_p)state, (uint8x16_p)key);
1672 #elif defined(__clang__) 1673 return (T1)__builtin_altivec_crypto_vncipherlast((uint64x2_p)state, (uint64x2_p)key);
1674 #elif defined(__GNUC__) 1675 return (T1)__builtin_crypto_vncipherlast((uint64x2_p)state, (uint64x2_p)key);
1697 template <
int func,
int fmask,
class T>
1700 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1701 return (T)__vshasigmaw((uint32x4_p)vec, func, fmask);
1702 #elif defined(__clang__) 1703 return (T)__builtin_altivec_crypto_vshasigmaw((uint32x4_p)vec, func, fmask);
1704 #elif defined(__GNUC__) 1705 return (T)__builtin_crypto_vshasigmaw((uint32x4_p)vec, func, fmask);
1722 template <
int func,
int fmask,
class T>
1725 #if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__)) 1726 return (T)__vshasigmad((uint64x2_p)vec, func, fmask);
1727 #elif defined(__clang__) 1728 return (T)__builtin_altivec_crypto_vshasigmad((uint64x2_p)vec, func, fmask);
1729 #elif defined(__GNUC__) 1730 return (T)__builtin_crypto_vshasigmad((uint64x2_p)vec, func, fmask);
1738 #endif // __CRYPTO__ 1744 #if CRYPTOPP_GCC_DIAGNOSTIC_AVAILABLE 1745 # pragma GCC diagnostic pop 1748 #endif // CRYPTOPP_PPC_CRYPTO_H T1 VecDecryptLast(const T1 state, const T2 key)
Final round of AES decryption.
Utility functions for the Crypto++ library.
T VecSHA512(const T vec)
SHA512 Sigma functions.
T VecShiftRightOctet(const T vec)
Shift a vector right.
uint32x4_p VecPolyMultiply(const uint32x4_p &a, const uint32x4_p &b)
Polynomial multiplication.
uint32x4_p VecLoadAligned(const byte src[16])
Loads a vector from an aligned byte array.
T VecReverse(const T data)
Reverse bytes in a vector.
T VecGetLow(const T val)
Extract a dword from a vector.
T1 VecSub(const T1 vec1, const T2 vec2)
Subtract two vectors.
uint32x4_p VecAdd64(const uint32x4_p &vec1, const uint32x4_p &vec2)
Add two vectors.
uint64x2_p VecPolyMultiply01LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Library configuration file.
T1 VecAdd(const T1 vec1, const T2 vec2)
Add two vectors.
T VecGetHigh(const T val)
Extract a dword from a vector.
uint64x2_p VecPolyMultiply10LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
uint32x4_p VecLoad_ALTIVEC(const byte src[16])
Loads a vector from a byte array.
uint32x4_p VecShiftLeft(const uint32x4_p vec)
Shift a packed vector left.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
uint64x2_p VecPolyMultiply00LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
uint32x4_p VecOne()
The 1 vector.
T VecSwapWords(const T vec)
Exchange high and low double words.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
bool VecNotEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
T VecRotateLeftOctet(const T vec)
Rotate a vector left.
T VecMergeHigh(const T vec1, const T vec2)
Merge two vectors.
void VecStoreBE(const T data, byte dest[16])
Stores a vector to a byte array.
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
T VecMergeLow(const T vec1, const T vec2)
Merge two vectors.
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
void VecStore_ALTIVEC(const T data, byte dest[16])
Stores a vector to a byte array.
T VecSHA256(const T vec)
SHA256 Sigma functions.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
bool VecEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
uint32x4_p VecZero()
The 0 vector.
__vector unsigned short uint16x8_p
Vector of 16-bit elements.
uint32x4_p VecShiftRight(const uint32x4_p vec)
Shift a packed vector right.
uint32x4_p VecRotateLeft(const uint32x4_p vec)
Rotate a packed vector left.
uint32x4_p VecRotateRight(const uint32x4_p vec)
Rotate a packed vector right.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
T1 VecOr(const T1 vec1, const T2 vec2)
OR two vectors.
T1 VecEncryptLast(const T1 state, const T2 key)
Final round of AES encryption.
Crypto++ library namespace.
T1 VecDecrypt(const T1 state, const T2 key)
One round of AES decryption.
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
T VecRotateRightOctet(const T vec)
Rotate a vector right.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
T VecShiftLeftOctet(const T vec)
Shift a vector left.
T1 VecAnd(const T1 vec1, const T2 vec2)
AND two vectors.
uint64x2_p VecPolyMultiply11LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
T1 VecEncrypt(const T1 state, const T2 key)
One round of AES encryption.