Export of internal Abseil changes.

--
c321829735accc2e6beb81e6a5a4421e5647b876 by CJ Johnson <johnsoncj@google.com>:

Updates the definition of InlinedVector::swap(InlinedVector&) to be exception safe and adds exception safety tests

PiperOrigin-RevId: 255511536

--
0d86445891748efb09430eb9ede267b54185a246 by CJ Johnson <johnsoncj@google.com>:

Updates the definition of InlinedVector::erase(...) to be exception safe and adds an exception safety test for it.

PiperOrigin-RevId: 255492671

--
f07e8fa62dfe9eb0d025b27fca8c6db43c5a328f by CJ Johnson <johnsoncj@google.com>:

Updates the implementation of InlinedVector::emplace_back(...) to be exception safe and adds exception safety tests

PiperOrigin-RevId: 255422837

--
4c3be92bfe4c1636a03cef8fd5aa802fed0d2c61 by Abseil Team <absl-team@google.com>:

Internal Change

PiperOrigin-RevId: 255422693

--
6df38ea42f00678c357a539016163f8ac4c084e6 by Gennadiy Rozental <rogeeff@google.com>:

Introduce public interfaces for setting and getting program usage messages.

PiperOrigin-RevId: 255291467

--
8f21d594aed3971d37db70226847c693eb548edb by Laramie Leavitt <lar@google.com>:

Move absl/random's copy of ABSL_ATTRIBUTE_FORCE_INLINE and
ABSL_ATTRIBUTE_NEVER_INLINE into .cc files and rename to
prevent conflicts.

https://github.com/abseil/abseil-cpp/issues/343

PiperOrigin-RevId: 255288599

--
6b7430ad0c8bd860fb9394894f5eeedd1acc9f77 by CJ Johnson <johnsoncj@google.com>:

Updates the ScopedAllocatorWorks test for InlinedVector to not rely on the byte count allocated by the standard library

In doing so, removes LegacyNextCapacityFrom(...) impl function from InlinedVector

Also applies clang-format to the test file

PiperOrigin-RevId: 255207606
GitOrigin-RevId: c321829735accc2e6beb81e6a5a4421e5647b876
Change-Id: I7438211c36c4549fca2e866658f8d579c65d7d52
This commit is contained in:
Abseil Team 2019-06-27 17:24:26 -07:00 committed by Shaindel Schwartz
parent 72e09a54d9
commit c964fcffac
16 changed files with 518 additions and 367 deletions

View file

@ -59,6 +59,24 @@
#include <time.h> // NOLINT
#endif
// ABSL_HAVE_ATTRIBUTE
#if !defined(ABSL_HAVE_ATTRIBUTE)
#ifdef __has_attribute
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define ABSL_HAVE_ATTRIBUTE(x) 0
#endif
#endif
// ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method.
#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
#else
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE
#endif
namespace absl {
namespace random_internal_nanobenchmark {
namespace {
@ -658,8 +676,8 @@ Ticks TotalDuration(const Func func, const void* arg, const InputVec* inputs,
}
// (Nearly) empty Func for measuring timer overhead/resolution.
ABSL_ATTRIBUTE_NEVER_INLINE FuncOutput EmptyFunc(const void* arg,
const FuncInput input) {
ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE FuncOutput
EmptyFunc(const void* arg, const FuncInput input) {
return input;
}

View file

@ -81,50 +81,8 @@
// Attribute Checks
// -----------------------------------------------------------------------------
// ABSL_HAVE_ATTRIBUTE
#undef ABSL_HAVE_ATTRIBUTE
#ifdef __has_attribute
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define ABSL_HAVE_ATTRIBUTE(x) 0
#endif
// ABSL_ATTRIBUTE_ALWAYS_INLINE forces inlining of the method.
#undef ABSL_ATTRIBUTE_ALWAYS_INLINE
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#elif defined(_MSC_VER)
// We can achieve something similar to attribute((always_inline)) with MSVC by
// using the __forceinline keyword, however this is not perfect. MSVC is
// much less aggressive about inlining, and even with the __forceinline keyword.
#define ABSL_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define ABSL_ATTRIBUTE_ALWAYS_INLINE
#endif
// ABSL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method.
#undef ABSL_ATTRIBUTE_NEVER_INLINE
#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define ABSL_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
#else
#define ABSL_ATTRIBUTE_NEVER_INLINE
#endif
// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within
// the indicated function.
#undef ABSL_ATTRIBUTE_FLATTEN
#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten))
#else
#define ABSL_ATTRIBUTE_FLATTEN
#endif
// ABSL_RANDOM_INTERNAL_RESTRICT annotates whether pointers may be considered
// to be unaliased.
#undef ABSL_RANDOM_INTERNAL_RESTRICT
#if defined(__clang__) || defined(__GNUC__)
#define ABSL_RANDOM_INTERNAL_RESTRICT __restrict__
#elif defined(_MSC_VER)

View file

@ -24,6 +24,37 @@
#include "absl/random/internal/platform.h"
// ABSL_HAVE_ATTRIBUTE
#if !defined(ABSL_HAVE_ATTRIBUTE)
#ifdef __has_attribute
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define ABSL_HAVE_ATTRIBUTE(x) 0
#endif
#endif
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
__attribute__((always_inline))
#elif defined(_MSC_VER)
// We can achieve something similar to attribute((always_inline)) with MSVC by
// using the __forceinline keyword, however this is not perfect. MSVC is
// much less aggressive about inlining, and even with the __forceinline keyword.
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
#endif
// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within
// the indicated function.
#undef ABSL_ATTRIBUTE_FLATTEN
#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten))
#else
#define ABSL_ATTRIBUTE_FLATTEN
#endif
// ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain
// a hardware accelerated implementation of randen, or whether it
// will contain stubs that exit the process.
@ -160,7 +191,7 @@ using Vector128 = __vector unsigned long long; // NOLINT(runtime/int)
namespace {
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
ReverseBytes(const Vector128& v) {
// Reverses the bytes of the vector.
const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8,
@ -171,26 +202,26 @@ ReverseBytes(const Vector128& v) {
// WARNING: these load/store in native byte order. It is OK to load and then
// store an unchanged vector, but interpreting the bits as a number or input
// to AES will have undefined results.
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
return vec_vsx_ld(0, reinterpret_cast<const Vector128*>(from));
}
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
vec_vsx_st(v, 0, reinterpret_cast<Vector128*>(to));
}
// One round of AES. "round_key" is a public constant for breaking the
// symmetry of AES (ensures previously equal columns differ afterwards).
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
AesRound(const Vector128& state, const Vector128& round_key) {
return Vector128(__builtin_crypto_vcipher(state, round_key));
}
// Enables native loads in the round loop by pre-swapping.
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
using absl::random_internal::RandenTraits;
constexpr size_t kLanes = 2;
constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks;
@ -242,19 +273,19 @@ using Vector128 = uint8x16_t;
namespace {
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
return vld1q_u8(reinterpret_cast<const uint8_t*>(from));
}
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
vst1q_u8(reinterpret_cast<uint8_t*>(to), v);
}
// One round of AES. "round_key" is a public constant for breaking the
// symmetry of AES (ensures previously equal columns differ afterwards).
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
AesRound(const Vector128& state, const Vector128& round_key) {
// It is important to always use the full round function - omitting the
// final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
@ -266,8 +297,8 @@ AesRound(const Vector128& state, const Vector128& round_key) {
return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key;
}
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
} // namespace
@ -282,13 +313,15 @@ namespace {
class Vector128 {
public:
// Convert from/to intrinsics.
inline ABSL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128(
const __m128i& Vector128)
: data_(Vector128) {}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const { return data_; }
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const {
return data_;
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
const Vector128& other) {
data_ = _mm_xor_si128(data_, other.data());
return *this;
@ -298,20 +331,20 @@ class Vector128 {
__m128i data_;
};
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
return Vector128(_mm_load_si128(reinterpret_cast<const __m128i*>(from)));
}
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
_mm_store_si128(reinterpret_cast<__m128i * ABSL_RANDOM_INTERNAL_RESTRICT>(to),
v.data());
}
// One round of AES. "round_key" is a public constant for breaking the
// symmetry of AES (ensures previously equal columns differ afterwards).
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
AesRound(const Vector128& state, const Vector128& round_key) {
// It is important to always use the full round function - omitting the
// final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
@ -319,8 +352,8 @@ AesRound(const Vector128& state, const Vector128& round_key) {
return Vector128(_mm_aesenc_si128(state.data(), round_key.data()));
}
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
} // namespace
@ -417,8 +450,8 @@ constexpr size_t kLanes = 2;
// Block shuffles applies a shuffle to the entire state between AES rounds.
// Improved odd-even shuffle from "New criterion for diffusion property".
inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void BlockShuffle(
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void
BlockShuffle(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
constexpr size_t shuffle[kFeistelBlocks] = {7, 2, 13, 4, 11, 8, 3, 6,
@ -466,9 +499,10 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void BlockShuffle(
// per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
// parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
// XORs are 'free' (included in the second AES instruction).
inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const u64x2*
FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const
u64x2*
FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
// MSVC does a horrible job at unrolling loops.
@ -527,9 +561,9 @@ FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
// Indistinguishable from ideal by chosen-ciphertext adversaries using less than
// 2^64 queries if the round function is a PRF. This is similar to the b=8 case
// of Simpira v2, but more efficient than its generic construction for b=16.
inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void Permute(
const void* ABSL_RANDOM_INTERNAL_RESTRICT keys,
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void
Permute(const void* ABSL_RANDOM_INTERNAL_RESTRICT keys,
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
static_cast<const u64x2*>(keys);

View file

@ -20,6 +20,28 @@
#include "absl/random/internal/platform.h"
// ABSL_HAVE_ATTRIBUTE
#if !defined(ABSL_HAVE_ATTRIBUTE)
#ifdef __has_attribute
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define ABSL_HAVE_ATTRIBUTE(x) 0
#endif
#endif
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
__attribute__((always_inline))
#elif defined(_MSC_VER)
// We can achieve something similar to attribute((always_inline)) with MSVC by
// using the __forceinline keyword, however this is not perfect. MSVC is
// much less aggressive about inlining, and even with the __forceinline keyword.
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
#endif
namespace {
// AES portions based on rijndael-alg-fst.c,
@ -222,7 +244,7 @@ struct alignas(16) u64x2 {
// as an underlying vector register.
//
struct Vector128 {
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
const Vector128& other) {
s[0] ^= other.s[0];
s[1] ^= other.s[1];
@ -234,7 +256,7 @@ struct Vector128 {
uint32_t s[4];
};
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
Vector128 result;
const uint8_t* ABSL_RANDOM_INTERNAL_RESTRICT src =
@ -259,7 +281,7 @@ Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
return result;
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
uint8_t* dst = reinterpret_cast<uint8_t*>(to);
dst[0] = static_cast<uint8_t>(v.s[0] >> 24);
@ -282,7 +304,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
// One round of AES. "round_key" is a public constant for breaking the
// symmetry of AES (ensures previously equal columns differ afterwards).
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
AesRound(const Vector128& state, const Vector128& round_key) {
// clang-format off
Vector128 result;
@ -348,7 +370,7 @@ static_assert(kKeys == kRoundKeys, "kKeys and kRoundKeys must be equal");
static constexpr size_t kLanes = 2;
// The improved Feistel block shuffle function for 16 blocks.
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state_u64) {
static_assert(kFeistelBlocks == 16,
"Feistel block shuffle only works for 16 blocks.");
@ -409,7 +431,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
// per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
// parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
// XORs are 'free' (included in the second AES instruction).
inline ABSL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
for (size_t branch = 0; branch < kFeistelBlocks; branch += 4) {
@ -435,7 +457,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
// Indistinguishable from ideal by chosen-ciphertext adversaries using less than
// 2^64 queries if the round function is a PRF. This is similar to the b=8 case
// of Simpira v2, but more efficient than its generic construction for b=16.
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Permute(
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
const void* keys, uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
static_cast<const u64x2*>(keys);