merge(3p/absl): subtree merge of Abseil up to e19260f

... notably, this includes Abseil's own StatusOr type, which
conflicted with our implementation (that was taken from TensorFlow).

Change-Id: Ie7d6764b64055caaeb8dc7b6b9d066291e6b538f
This commit is contained in:
Vincent Ambo 2020-11-21 14:43:54 +01:00
parent cc27324d02
commit 082c006c04
854 changed files with 11260 additions and 5296 deletions

View file

@ -30,7 +30,7 @@ package(default_visibility = [
"//absl/random:__pkg__",
])
licenses(["notice"]) # Apache 2.0
licenses(["notice"])
cc_library(
name = "traits",
@ -59,7 +59,10 @@ cc_library(
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/base:config"],
deps = [
"//absl/base:config",
"//absl/meta:type_traits",
],
)
cc_library(
@ -96,6 +99,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = select({
"//absl:windows": [],
"//absl:wasm": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
deps = [
@ -319,10 +323,6 @@ cc_library(
"//absl:windows": [],
"//conditions:default": ["-Wno-pass-failed"],
}),
# copts in RANDEN_HWAES_COPTS can make this target unusable as a module
# leading to a Clang diagnostic. Furthermore, it only has a private header
# anyway and thus there wouldn't be any gain from using it as a module.
features = ["-header_modules"],
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":platform",
@ -716,3 +716,15 @@ cc_test(
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "uniform_helper_test",
size = "small",
srcs = ["uniform_helper_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":uniform_helper",
"@com_google_googletest//:gtest_main",
],
)

View file

@ -21,6 +21,7 @@
#include <type_traits>
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@ -38,28 +39,17 @@ constexpr bool IsPowerOfTwoOrZero(UIntType n) {
template <typename URBG>
constexpr typename URBG::result_type RangeSize() {
using result_type = typename URBG::result_type;
static_assert((URBG::max)() != (URBG::min)(), "URBG range cannot be 0.");
return ((URBG::max)() == (std::numeric_limits<result_type>::max)() &&
(URBG::min)() == std::numeric_limits<result_type>::lowest())
? result_type{0}
: (URBG::max)() - (URBG::min)() + result_type{1};
}
template <typename UIntType>
constexpr UIntType LargestPowerOfTwoLessThanOrEqualTo(UIntType n) {
return n < 2 ? n : 2 * LargestPowerOfTwoLessThanOrEqualTo(n / 2);
}
// Given a URBG generating values in the closed interval [Lo, Hi], returns the
// largest power of two less than or equal to `Hi - Lo + 1`.
template <typename URBG>
constexpr typename URBG::result_type PowerOfTwoSubRangeSize() {
return LargestPowerOfTwoLessThanOrEqualTo(RangeSize<URBG>());
: ((URBG::max)() - (URBG::min)() + result_type{1});
}
// Computes the floor of the log. (i.e., std::floor(std::log2(N));
template <typename UIntType>
constexpr UIntType IntegerLog2(UIntType n) {
return (n <= 1) ? 0 : 1 + IntegerLog2(n / 2);
return (n <= 1) ? 0 : 1 + IntegerLog2(n >> 1);
}
// Returns the number of bits of randomness returned through
@ -68,18 +58,23 @@ template <typename URBG>
constexpr size_t NumBits() {
return RangeSize<URBG>() == 0
? std::numeric_limits<typename URBG::result_type>::digits
: IntegerLog2(PowerOfTwoSubRangeSize<URBG>());
: IntegerLog2(RangeSize<URBG>());
}
// Given a shift value `n`, constructs a mask with exactly the low `n` bits set.
// If `n == 0`, all bits are set.
template <typename UIntType>
constexpr UIntType MaskFromShift(UIntType n) {
constexpr UIntType MaskFromShift(size_t n) {
return ((n % std::numeric_limits<UIntType>::digits) == 0)
? ~UIntType{0}
: (UIntType{1} << n) - UIntType{1};
}
// Tags used to dispatch FastUniformBits::generate to the simple or more complex
// entropy extraction algorithm.
struct SimplifiedLoopTag {};
struct RejectionLoopTag {};
// FastUniformBits implements a fast path to acquire uniform independent bits
// from a type which conforms to the [rand.req.urbg] concept.
// Parameterized by:
@ -107,50 +102,16 @@ class FastUniformBits {
"Class-template FastUniformBits<> must be parameterized using "
"an unsigned type.");
// PowerOfTwoVariate() generates a single random variate, always returning a
// value in the half-open interval `[0, PowerOfTwoSubRangeSize<URBG>())`. If
// the URBG already generates values in a power-of-two range, the generator
// itself is used. Otherwise, we use rejection sampling on the largest
// possible power-of-two-sized subrange.
struct PowerOfTwoTag {};
struct RejectionSamplingTag {};
template <typename URBG>
static typename URBG::result_type PowerOfTwoVariate(
URBG& g) { // NOLINT(runtime/references)
using tag =
typename std::conditional<IsPowerOfTwoOrZero(RangeSize<URBG>()),
PowerOfTwoTag, RejectionSamplingTag>::type;
return PowerOfTwoVariate(g, tag{});
}
template <typename URBG>
static typename URBG::result_type PowerOfTwoVariate(
URBG& g, // NOLINT(runtime/references)
PowerOfTwoTag) {
return g() - (URBG::min)();
}
template <typename URBG>
static typename URBG::result_type PowerOfTwoVariate(
URBG& g, // NOLINT(runtime/references)
RejectionSamplingTag) {
// Use rejection sampling to ensure uniformity across the range.
typename URBG::result_type u;
do {
u = g() - (URBG::min)();
} while (u >= PowerOfTwoSubRangeSize<URBG>());
return u;
}
// Generate() generates a random value, dispatched on whether
// the underlying URBG must loop over multiple calls or not.
// the underlying URBG must use rejection sampling to generate a value,
// or whether a simplified loop will suffice.
template <typename URBG>
result_type Generate(URBG& g, // NOLINT(runtime/references)
std::true_type /* avoid_looping */);
SimplifiedLoopTag);
template <typename URBG>
result_type Generate(URBG& g, // NOLINT(runtime/references)
std::false_type /* avoid_looping */);
RejectionLoopTag);
};
template <typename UIntType>
@ -162,31 +123,47 @@ FastUniformBits<UIntType>::operator()(URBG& g) { // NOLINT(runtime/references)
// Y = (2 ^ kRange) - 1
static_assert((URBG::max)() > (URBG::min)(),
"URBG::max and URBG::min may not be equal.");
using tag = absl::conditional_t<IsPowerOfTwoOrZero(RangeSize<URBG>()),
SimplifiedLoopTag, RejectionLoopTag>;
return Generate(g, tag{});
}
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
SimplifiedLoopTag) {
// The simplified version of FastUniformBits works only on URBGs that have
// a range that is a power of 2. In this case we simply loop and shift without
// attempting to balance the bits across calls.
static_assert(IsPowerOfTwoOrZero(RangeSize<URBG>()),
"incorrect Generate tag for URBG instance");
static constexpr size_t kResultBits =
std::numeric_limits<result_type>::digits;
static constexpr size_t kUrbgBits = NumBits<URBG>();
static constexpr size_t kIters =
(kResultBits / kUrbgBits) + (kResultBits % kUrbgBits != 0);
static constexpr size_t kShift = (kIters == 1) ? 0 : kUrbgBits;
static constexpr auto kMin = (URBG::min)();
result_type r = static_cast<result_type>(g() - kMin);
for (size_t n = 1; n < kIters; ++n) {
r = (r << kShift) + static_cast<result_type>(g() - kMin);
}
return r;
}
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
RejectionLoopTag) {
static_assert(!IsPowerOfTwoOrZero(RangeSize<URBG>()),
"incorrect Generate tag for URBG instance");
using urbg_result_type = typename URBG::result_type;
constexpr urbg_result_type kRangeMask =
RangeSize<URBG>() == 0
? (std::numeric_limits<urbg_result_type>::max)()
: static_cast<urbg_result_type>(PowerOfTwoSubRangeSize<URBG>() - 1);
return Generate(g, std::integral_constant<bool, (kRangeMask >= (max)())>{});
}
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
std::true_type /* avoid_looping */) {
// The width of the result_type is less than than the width of the random bits
// provided by URBG. Thus, generate a single value and then simply mask off
// the required bits.
return PowerOfTwoVariate(g) & (max)();
}
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
std::false_type /* avoid_looping */) {
// See [rand.adapt.ibits] for more details on the constants calculated below.
//
// It is preferable to use roughly the same number of bits from each generator
@ -199,21 +176,44 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
// `kSmallIters` and `kLargeIters` times respectively such
// that
//
// `kTotalWidth == kSmallIters * kSmallWidth
// + kLargeIters * kLargeWidth`
// `kResultBits == kSmallIters * kSmallBits
// + kLargeIters * kLargeBits`
//
// where `kTotalWidth` is the total number of bits in `result_type`.
// where `kResultBits` is the total number of bits in `result_type`.
//
constexpr size_t kTotalWidth = std::numeric_limits<result_type>::digits;
constexpr size_t kUrbgWidth = NumBits<URBG>();
constexpr size_t kTotalIters =
kTotalWidth / kUrbgWidth + (kTotalWidth % kUrbgWidth != 0);
constexpr size_t kSmallWidth = kTotalWidth / kTotalIters;
constexpr size_t kLargeWidth = kSmallWidth + 1;
static constexpr size_t kResultBits =
std::numeric_limits<result_type>::digits; // w
static constexpr urbg_result_type kUrbgRange = RangeSize<URBG>(); // R
static constexpr size_t kUrbgBits = NumBits<URBG>(); // m
// compute the initial estimate of the bits used.
// [rand.adapt.ibits] 2 (c)
static constexpr size_t kA = // ceil(w/m)
(kResultBits / kUrbgBits) + ((kResultBits % kUrbgBits) != 0); // n'
static constexpr size_t kABits = kResultBits / kA; // w0'
static constexpr urbg_result_type kARejection =
((kUrbgRange >> kABits) << kABits); // y0'
// refine the selection to reduce the rejection frequency.
static constexpr size_t kTotalIters =
((kUrbgRange - kARejection) <= (kARejection / kA)) ? kA : (kA + 1); // n
// [rand.adapt.ibits] 2 (b)
static constexpr size_t kSmallIters =
kTotalIters - (kResultBits % kTotalIters); // n0
static constexpr size_t kSmallBits = kResultBits / kTotalIters; // w0
static constexpr urbg_result_type kSmallRejection =
((kUrbgRange >> kSmallBits) << kSmallBits); // y0
static constexpr size_t kLargeBits = kSmallBits + 1; // w0+1
static constexpr urbg_result_type kLargeRejection =
((kUrbgRange >> kLargeBits) << kLargeBits); // y1
//
// Because `kLargeWidth == kSmallWidth + 1`, it follows that
// Because `kLargeBits == kSmallBits + 1`, it follows that
//
// `kTotalWidth == kTotalIters * kSmallWidth + kLargeIters`
// `kResultBits == kSmallIters * kSmallBits + kLargeIters`
//
// and therefore
//
@ -224,36 +224,40 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references)
// mentioned above, if the URBG width is a divisor of `kTotalWidth`, then
// there would be no need for any large iterations (i.e., one loop would
// suffice), and indeed, in this case, `kLargeIters` would be zero.
constexpr size_t kLargeIters = kTotalWidth % kSmallWidth;
constexpr size_t kSmallIters =
(kTotalWidth - (kLargeWidth * kLargeIters)) / kSmallWidth;
static_assert(kResultBits == kSmallIters * kSmallBits +
(kTotalIters - kSmallIters) * kLargeBits,
"Error in looping constant calculations.");
static_assert(
kTotalWidth == kSmallIters * kSmallWidth + kLargeIters * kLargeWidth,
"Error in looping constant calculations.");
// The small shift is essentially small bits, but due to the potential
// of generating a smaller result_type from a larger urbg type, the actual
// shift might be 0.
static constexpr size_t kSmallShift = kSmallBits % kResultBits;
static constexpr auto kSmallMask =
MaskFromShift<urbg_result_type>(kSmallShift);
static constexpr size_t kLargeShift = kLargeBits % kResultBits;
static constexpr auto kLargeMask =
MaskFromShift<urbg_result_type>(kLargeShift);
static constexpr auto kMin = (URBG::min)();
result_type s = 0;
constexpr size_t kSmallShift = kSmallWidth % kTotalWidth;
constexpr result_type kSmallMask = MaskFromShift(result_type{kSmallShift});
for (size_t n = 0; n < kSmallIters; ++n) {
s = (s << kSmallShift) +
(static_cast<result_type>(PowerOfTwoVariate(g)) & kSmallMask);
urbg_result_type v;
do {
v = g() - kMin;
} while (v >= kSmallRejection);
s = (s << kSmallShift) + static_cast<result_type>(v & kSmallMask);
}
constexpr size_t kLargeShift = kLargeWidth % kTotalWidth;
constexpr result_type kLargeMask = MaskFromShift(result_type{kLargeShift});
for (size_t n = 0; n < kLargeIters; ++n) {
s = (s << kLargeShift) +
(static_cast<result_type>(PowerOfTwoVariate(g)) & kLargeMask);
for (size_t n = kSmallIters; n < kTotalIters; ++n) {
urbg_result_type v;
do {
v = g() - kMin;
} while (v >= kLargeRejection);
s = (s << kLargeShift) + static_cast<result_type>(v & kLargeMask);
}
static_assert(
kLargeShift == kSmallShift + 1 ||
(kLargeShift == 0 &&
kSmallShift == std::numeric_limits<result_type>::digits - 1),
"Error in looping constant calculations");
return s;
}

View file

@ -34,8 +34,8 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) {
using Limits = std::numeric_limits<TypeParam>;
using FastBits = FastUniformBits<TypeParam>;
EXPECT_EQ(0, FastBits::min());
EXPECT_EQ(Limits::max(), FastBits::max());
EXPECT_EQ(0, (FastBits::min)());
EXPECT_EQ((Limits::max)(), (FastBits::max)());
constexpr int kIters = 10000;
std::random_device rd;
@ -43,8 +43,8 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) {
FastBits fast;
for (int i = 0; i < kIters; i++) {
const auto v = fast(gen);
EXPECT_LE(v, FastBits::max());
EXPECT_GE(v, FastBits::min());
EXPECT_LE(v, (FastBits::max)());
EXPECT_GE(v, (FastBits::min)());
}
}
@ -52,21 +52,26 @@ template <typename UIntType, UIntType Lo, UIntType Hi, UIntType Val = Lo>
struct FakeUrbg {
using result_type = UIntType;
FakeUrbg() = default;
explicit FakeUrbg(bool r) : reject(r) {}
static constexpr result_type(max)() { return Hi; }
static constexpr result_type(min)() { return Lo; }
result_type operator()() { return Val; }
};
result_type operator()() {
// when reject is set, return Hi half the time.
return ((++calls % 2) == 1 && reject) ? Hi : Val;
}
using UrngOddbits = FakeUrbg<uint8_t, 1, 0xfe, 0x73>;
using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>;
using Urng31bits = FakeUrbg<uint32_t, 1, 0xfffffffe, 0x60070f03>;
using Urng32bits = FakeUrbg<uint32_t, 0, 0xffffffff, 0x74010f01>;
bool reject = false;
size_t calls = 0;
};
TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) {
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{0}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{1}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{2}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{3}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{4}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{16}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{17}));
EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint8_t>::max)()));
@ -75,6 +80,7 @@ TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) {
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{1}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{2}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{3}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{4}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{16}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{17}));
EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint16_t>::max)()));
@ -91,181 +97,237 @@ TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) {
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{1}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{2}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{3}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{4}));
EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{64}));
EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{17}));
EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint64_t>::max)()));
}
TEST(FastUniformBitsTest, IntegerLog2) {
EXPECT_EQ(IntegerLog2(uint16_t{0}), 0);
EXPECT_EQ(IntegerLog2(uint16_t{1}), 0);
EXPECT_EQ(IntegerLog2(uint16_t{2}), 1);
EXPECT_EQ(IntegerLog2(uint16_t{3}), 1);
EXPECT_EQ(IntegerLog2(uint16_t{4}), 2);
EXPECT_EQ(IntegerLog2(uint16_t{5}), 2);
EXPECT_EQ(IntegerLog2(std::numeric_limits<uint64_t>::max()), 63);
EXPECT_EQ(0, IntegerLog2(uint16_t{0}));
EXPECT_EQ(0, IntegerLog2(uint16_t{1}));
EXPECT_EQ(1, IntegerLog2(uint16_t{2}));
EXPECT_EQ(1, IntegerLog2(uint16_t{3}));
EXPECT_EQ(2, IntegerLog2(uint16_t{4}));
EXPECT_EQ(2, IntegerLog2(uint16_t{5}));
EXPECT_EQ(2, IntegerLog2(uint16_t{7}));
EXPECT_EQ(3, IntegerLog2(uint16_t{8}));
EXPECT_EQ(63, IntegerLog2((std::numeric_limits<uint64_t>::max)()));
}
TEST(FastUniformBitsTest, RangeSize) {
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 10>>()), 9);
EXPECT_EQ(2, (RangeSize<FakeUrbg<uint8_t, 0, 1>>()));
EXPECT_EQ(3, (RangeSize<FakeUrbg<uint8_t, 0, 2>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint8_t, 0, 3>>()));
// EXPECT_EQ(0, (RangeSize<FakeUrbg<uint8_t, 2, 2>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint8_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint8_t, 2, 6>>()));
EXPECT_EQ(9, (RangeSize<FakeUrbg<uint8_t, 2, 10>>()));
EXPECT_EQ(
(RangeSize<FakeUrbg<uint8_t, 0, std::numeric_limits<uint8_t>::max()>>()),
0);
0, (RangeSize<
FakeUrbg<uint8_t, 0, (std::numeric_limits<uint8_t>::max)()>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 1000, 1017>>()), 18);
EXPECT_EQ((RangeSize<
FakeUrbg<uint16_t, 0, std::numeric_limits<uint16_t>::max()>>()),
0);
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint16_t, 0, 3>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint16_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint16_t, 2, 6>>()));
EXPECT_EQ(18, (RangeSize<FakeUrbg<uint16_t, 1000, 1017>>()));
EXPECT_EQ(
0, (RangeSize<
FakeUrbg<uint16_t, 0, (std::numeric_limits<uint16_t>::max)()>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1000, 1017>>()), 18);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()), 0);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()), 0xffffffff);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()), 0xfffffffe);
EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 0xfffffffe>>()), 0xfffffffd);
EXPECT_EQ((RangeSize<
FakeUrbg<uint32_t, 0, std::numeric_limits<uint32_t>::max()>>()),
0);
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint32_t, 0, 3>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint32_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint32_t, 2, 6>>()));
EXPECT_EQ(18, (RangeSize<FakeUrbg<uint32_t, 1000, 1017>>()));
EXPECT_EQ(0, (RangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()));
EXPECT_EQ(0xffffffff, (RangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()));
EXPECT_EQ(0xfffffffe, (RangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()));
EXPECT_EQ(0xfffffffd, (RangeSize<FakeUrbg<uint32_t, 2, 0xfffffffe>>()));
EXPECT_EQ(
0, (RangeSize<
FakeUrbg<uint32_t, 0, (std::numeric_limits<uint32_t>::max)()>>()));
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 3>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 2>>()), 1);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 5>>()), 4);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 6>>()), 5);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1000, 1017>>()), 18);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()), 0x100000000ull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()), 0xffffffffull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()), 0xfffffffeull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffe>>()), 0xfffffffdull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffffull>>()), 0ull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffffull>>()),
0xffffffffffffffffull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffeull>>()),
0xfffffffffffffffeull);
EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffffffffffeull>>()),
0xfffffffffffffffdull);
EXPECT_EQ((RangeSize<
FakeUrbg<uint64_t, 0, std::numeric_limits<uint64_t>::max()>>()),
0);
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint64_t, 0, 3>>()));
EXPECT_EQ(4, (RangeSize<FakeUrbg<uint64_t, 2, 5>>()));
EXPECT_EQ(5, (RangeSize<FakeUrbg<uint64_t, 2, 6>>()));
EXPECT_EQ(18, (RangeSize<FakeUrbg<uint64_t, 1000, 1017>>()));
EXPECT_EQ(0x100000000, (RangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()));
EXPECT_EQ(0xffffffff, (RangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()));
EXPECT_EQ(0xfffffffe, (RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()));
EXPECT_EQ(0xfffffffd, (RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffe>>()));
EXPECT_EQ(0, (RangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffff>>()));
EXPECT_EQ(0xffffffffffffffff,
(RangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffff>>()));
EXPECT_EQ(0xfffffffffffffffe,
(RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffe>>()));
EXPECT_EQ(0xfffffffffffffffd,
(RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffffffffffe>>()));
EXPECT_EQ(
0, (RangeSize<
FakeUrbg<uint64_t, 0, (std::numeric_limits<uint64_t>::max)()>>()));
}
TEST(FastUniformBitsTest, PowerOfTwoSubRangeSize) {
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 10>>()), 8);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint8_t, 0, std::numeric_limits<uint8_t>::max()>>()),
0);
// The constants need to be choosen so that an infinite rejection loop doesn't
// happen...
using Urng1_5bit = FakeUrbg<uint8_t, 0, 2, 0>; // ~1.5 bits (range 3)
using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>;
using Urng22bits = FakeUrbg<uint32_t, 0, 0x3fffff, 0x301020>;
using Urng31bits = FakeUrbg<uint32_t, 1, 0xfffffffe, 0x60070f03>; // ~31.9 bits
using Urng32bits = FakeUrbg<uint32_t, 0, 0xffffffff, 0x74010f01>;
using Urng33bits =
FakeUrbg<uint64_t, 1, 0x1ffffffff, 0x013301033>; // ~32.9 bits
using Urng63bits = FakeUrbg<uint64_t, 1, 0xfffffffffffffffe,
0xfedcba9012345678>; // ~63.9 bits
using Urng64bits =
FakeUrbg<uint64_t, 0, 0xffffffffffffffff, 0x123456780fedcba9>;
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 1000, 1017>>()), 16);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint16_t, 0, std::numeric_limits<uint16_t>::max()>>()),
0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1000, 1017>>()), 16);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()), 0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()),
0x80000000);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()),
0x80000000);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint32_t, 0, std::numeric_limits<uint32_t>::max()>>()),
0);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 3>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 2>>()), 1);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 5>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 6>>()), 4);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1000, 1017>>()), 16);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()),
0x100000000ull);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()),
0x80000000ull);
EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()),
0x80000000ull);
EXPECT_EQ(
(PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffffull>>()),
0);
EXPECT_EQ(
(PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffffull>>()),
0x8000000000000000ull);
EXPECT_EQ(
(PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffeull>>()),
0x8000000000000000ull);
EXPECT_EQ((PowerOfTwoSubRangeSize<
FakeUrbg<uint64_t, 0, std::numeric_limits<uint64_t>::max()>>()),
0);
}
TEST(FastUniformBitsTest, Urng4_VariousOutputs) {
TEST(FastUniformBitsTest, OutputsUpTo32Bits) {
// Tests that how values are composed; the single-bit deltas should be spread
// across each invocation.
Urng1_5bit urng1_5;
Urng4bits urng4;
Urng22bits urng22;
Urng31bits urng31;
Urng32bits urng32;
Urng33bits urng33;
Urng63bits urng63;
Urng64bits urng64;
// 8-bit types
{
FastUniformBits<uint8_t> fast8;
EXPECT_EQ(0x0, fast8(urng1_5));
EXPECT_EQ(0x11, fast8(urng4));
EXPECT_EQ(0x20, fast8(urng22));
EXPECT_EQ(0x2, fast8(urng31));
EXPECT_EQ(0x1, fast8(urng32));
EXPECT_EQ(0x32, fast8(urng33));
EXPECT_EQ(0x77, fast8(urng63));
EXPECT_EQ(0xa9, fast8(urng64));
}
// 16-bit types
{
FastUniformBits<uint16_t> fast16;
EXPECT_EQ(0x0, fast16(urng1_5));
EXPECT_EQ(0x1111, fast16(urng4));
EXPECT_EQ(0xf02, fast16(urng31));
EXPECT_EQ(0xf01, fast16(urng32));
EXPECT_EQ(0x1020, fast16(urng22));
EXPECT_EQ(0x0f02, fast16(urng31));
EXPECT_EQ(0x0f01, fast16(urng32));
EXPECT_EQ(0x1032, fast16(urng33));
EXPECT_EQ(0x5677, fast16(urng63));
EXPECT_EQ(0xcba9, fast16(urng64));
}
// 32-bit types
{
FastUniformBits<uint32_t> fast32;
EXPECT_EQ(0x0, fast32(urng1_5));
EXPECT_EQ(0x11111111, fast32(urng4));
EXPECT_EQ(0x08301020, fast32(urng22));
EXPECT_EQ(0x0f020f02, fast32(urng31));
EXPECT_EQ(0x74010f01, fast32(urng32));
EXPECT_EQ(0x13301032, fast32(urng33));
EXPECT_EQ(0x12345677, fast32(urng63));
EXPECT_EQ(0x0fedcba9, fast32(urng64));
}
}
TEST(FastUniformBitsTest, Outputs64Bits) {
// Tests that how values are composed; the single-bit deltas should be spread
// across each invocation.
FastUniformBits<uint64_t> fast64;
{
FakeUrbg<uint8_t, 0, 1, 0> urng0;
FakeUrbg<uint8_t, 0, 1, 1> urng1;
Urng4bits urng4;
Urng22bits urng22;
Urng31bits urng31;
Urng32bits urng32;
Urng33bits urng33;
Urng63bits urng63;
Urng64bits urng64;
// somewhat degenerate cases only create a single bit.
EXPECT_EQ(0x0, fast64(urng0));
EXPECT_EQ(64, urng0.calls);
EXPECT_EQ(0xffffffffffffffff, fast64(urng1));
EXPECT_EQ(64, urng1.calls);
// less degenerate cases.
EXPECT_EQ(0x1111111111111111, fast64(urng4));
EXPECT_EQ(16, urng4.calls);
EXPECT_EQ(0x01020c0408301020, fast64(urng22));
EXPECT_EQ(3, urng22.calls);
EXPECT_EQ(0x387811c3c0870f02, fast64(urng31));
EXPECT_EQ(3, urng31.calls);
EXPECT_EQ(0x74010f0174010f01, fast64(urng32));
EXPECT_EQ(2, urng32.calls);
EXPECT_EQ(0x808194040cb01032, fast64(urng33));
EXPECT_EQ(3, urng33.calls);
EXPECT_EQ(0x1234567712345677, fast64(urng63));
EXPECT_EQ(2, urng63.calls);
EXPECT_EQ(0x123456780fedcba9, fast64(urng64));
EXPECT_EQ(1, urng64.calls);
}
// 64-bit types
// The 1.5 bit case is somewhat interesting in that the algorithm refinement
// causes one extra small sample. Comments here reference the names used in
// [rand.adapt.ibits] that correspond to this case.
{
FastUniformBits<uint64_t> fast64;
EXPECT_EQ(0x1111111111111111, fast64(urng4));
Urng1_5bit urng1_5;
// w = 64
// R = 3
// m = 1
// n' = 64
// w0' = 1
// y0' = 2
// n = (1 <= 0) > 64 : 65 = 65
// n0 = 65 - (64%65) = 1
// n1 = 64
// w0 = 0
// y0 = 3
// w1 = 1
// y1 = 2
EXPECT_EQ(0x0, fast64(urng1_5));
EXPECT_EQ(65, urng1_5.calls);
}
// Validate rejections for non-power-of-2 cases.
{
Urng1_5bit urng1_5(true);
Urng31bits urng31(true);
Urng33bits urng33(true);
Urng63bits urng63(true);
// For 1.5 bits, there would be 1+2*64, except the first
// value was accepted and shifted off the end.
EXPECT_EQ(0, fast64(urng1_5));
EXPECT_EQ(128, urng1_5.calls);
EXPECT_EQ(0x387811c3c0870f02, fast64(urng31));
EXPECT_EQ(0x74010f0174010f01, fast64(urng32));
EXPECT_EQ(6, urng31.calls);
EXPECT_EQ(0x808194040cb01032, fast64(urng33));
EXPECT_EQ(6, urng33.calls);
EXPECT_EQ(0x1234567712345677, fast64(urng63));
EXPECT_EQ(4, urng63.calls);
}
}
TEST(FastUniformBitsTest, URBG32bitRegression) {
// Validate with deterministic 32-bit std::minstd_rand
// to ensure that operator() performs as expected.
EXPECT_EQ(2147483646, RangeSize<std::minstd_rand>());
EXPECT_EQ(30, IntegerLog2(RangeSize<std::minstd_rand>()));
std::minstd_rand gen(1);
FastUniformBits<uint64_t> fast64;
EXPECT_EQ(0x05e47095f847c122ull, fast64(gen));
EXPECT_EQ(0x8f82c1ba30b64d22ull, fast64(gen));
EXPECT_EQ(0x3b971a3558155039ull, fast64(gen));
EXPECT_EQ(0x05e47095f8791f45, fast64(gen));
EXPECT_EQ(0x028be17e3c07c122, fast64(gen));
EXPECT_EQ(0x55d2847c1626e8c2, fast64(gen));
}
} // namespace

View file

@ -111,12 +111,9 @@ void TableGenerator::Print(std::ostream* os) {
"\n"
"#include \"absl/random/gaussian_distribution.h\"\n"
"\n"
// "namespace " and "absl" are broken apart so as not to conflict with
// script that adds the LTS inline namespace.
"namespace "
"absl {\n"
"namespace "
"random_internal {\n"
"namespace absl {\n"
"ABSL_NAMESPACE_BEGIN\n"
"namespace random_internal {\n"
"\n"
"const gaussian_distribution_base::Tables\n"
" gaussian_distribution_base::zg_ = {\n";
@ -125,10 +122,9 @@ void TableGenerator::Print(std::ostream* os) {
FormatArrayContents(os, tables_.f);
*os << "};\n"
"\n"
"} // namespace "
"random_internal\n"
"} // namespace "
"absl\n"
"} // namespace random_internal\n"
"ABSL_NAMESPACE_END\n"
"} // namespace absl\n"
"\n"
"// clang-format on\n"
"// END GENERATED CODE";

View file

@ -419,8 +419,8 @@ TEST(GenerateRealTest, ExhaustiveFloat) {
};
// Rely on RandU64ToFloat generating values from greatest to least when
// supplied with uint64_t values from greatest (0xfff...) to least (0x0). Thus,
// this algorithm stores the previous value, and if the new value is at
// supplied with uint64_t values from greatest (0xfff...) to least (0x0).
// Thus, this algorithm stores the previous value, and if the new value is at
// greater than or equal to the previous value, then there is a collision in
// the generation algorithm.
//

View file

@ -1,13 +1,13 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the"License");
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an"AS IS" BASIS,
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

View file

@ -150,6 +150,7 @@ struct alignas(16) u64x2 {
#include <altivec.h>
// <altivec.h> #defines vector __vector; in C++, this is bad form.
#undef vector
#undef bool
// Rely on the PowerPC AltiVec vector operations for accelerated AES
// instructions. GCC support of the PPC vector types is described in:

View file

@ -105,7 +105,7 @@ typename absl::enable_if_t<
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
uniform_lower_bound(Tag, IntType a, IntType) {
return a + 1;
return a < (std::numeric_limits<IntType>::max)() ? (a + 1) : a;
}
template <typename FloatType, typename Tag>
@ -136,7 +136,7 @@ typename absl::enable_if_t<
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
uniform_upper_bound(Tag, IntType, IntType b) {
return b - 1;
return b > (std::numeric_limits<IntType>::min)() ? (b - 1) : b;
}
template <typename FloatType, typename Tag>
@ -172,6 +172,40 @@ uniform_upper_bound(Tag, FloatType, FloatType b) {
return std::nextafter(b, (std::numeric_limits<FloatType>::max)());
}
// Returns whether the bounds are valid for the underlying distribution.
// Inputs must have already been resolved via uniform_*_bound calls.
//
// The c++ standard constraints in [rand.dist.uni.int] are listed as:
// requires: lo <= hi.
//
// In the uniform_int_distrubtion, {lo, hi} are closed, closed. Thus:
// [0, 0] is legal.
// [0, 0) is not legal, but [0, 1) is, which translates to [0, 0].
// (0, 1) is not legal, but (0, 2) is, which translates to [1, 1].
// (0, 0] is not legal, but (0, 1] is, which translates to [1, 1].
//
// The c++ standard constraints in [rand.dist.uni.real] are listed as:
// requires: lo <= hi.
// requires: (hi - lo) <= numeric_limits<T>::max()
//
// In the uniform_real_distribution, {lo, hi} are closed, open, Thus:
// [0, 0] is legal, which is [0, 0+epsilon).
// [0, 0) is legal.
// (0, 0) is not legal, but (0-epsilon, 0+epsilon) is.
// (0, 0] is not legal, but (0, 0+epsilon] is.
//
template <typename FloatType>
absl::enable_if_t<std::is_floating_point<FloatType>::value, bool>
is_uniform_range_valid(FloatType a, FloatType b) {
return a <= b && std::isfinite(b - a);
}
template <typename IntType>
absl::enable_if_t<std::is_integral<IntType>::value, bool>
is_uniform_range_valid(IntType a, IntType b) {
return a <= b;
}
// UniformDistribution selects either absl::uniform_int_distribution
// or absl::uniform_real_distribution depending on the NumType parameter.
template <typename NumType>

View file

@ -0,0 +1,279 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/random/internal/uniform_helper.h"
#include <cmath>
#include <cstdint>
#include <random>
#include "gtest/gtest.h"
namespace {
using absl::IntervalClosedClosedTag;
using absl::IntervalClosedOpenTag;
using absl::IntervalOpenClosedTag;
using absl::IntervalOpenOpenTag;
using absl::random_internal::uniform_inferred_return_t;
using absl::random_internal::uniform_lower_bound;
using absl::random_internal::uniform_upper_bound;
class UniformHelperTest : public testing::Test {};
TEST_F(UniformHelperTest, UniformBoundFunctionsGeneral) {
constexpr IntervalClosedClosedTag IntervalClosedClosed;
constexpr IntervalClosedOpenTag IntervalClosedOpen;
constexpr IntervalOpenClosedTag IntervalOpenClosed;
constexpr IntervalOpenOpenTag IntervalOpenOpen;
// absl::uniform_int_distribution natively assumes IntervalClosedClosed
// absl::uniform_real_distribution natively assumes IntervalClosedOpen
EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, 0, 100), 1);
EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, 0, 100), 1);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenClosed, 0, 1.0), 0);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenOpen, 0, 1.0), 0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenClosed, 0, 1.0), 0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenOpen, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, 0, 100), 0);
EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, 0, 100), 0);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedClosed, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedOpen, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedClosed, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedOpen, 0, 1.0), 0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, 0, 100), 99);
EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, 0, 100), 99);
EXPECT_EQ(uniform_upper_bound<float>(IntervalOpenOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound<float>(IntervalClosedOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalOpenOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalClosedOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, 0, 100), 100);
EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, 0, 100), 100);
EXPECT_GT(uniform_upper_bound<float>(IntervalOpenClosed, 0, 1.0), 1.0);
EXPECT_GT(uniform_upper_bound<float>(IntervalClosedClosed, 0, 1.0), 1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalOpenClosed, 0, 1.0), 1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalClosedClosed, 0, 1.0), 1.0);
// Negative value tests
EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, -100, -1), -99);
EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, -100, -1), -99);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenClosed, -2.0, -1.0), -2.0);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenOpen, -2.0, -1.0), -2.0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenClosed, -2.0, -1.0), -2.0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenOpen, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, -100, -1), -100);
EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, -100, -1), -100);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedClosed, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedOpen, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedClosed, -2.0, -1.0),
-2.0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedOpen, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, -100, -1), -2);
EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, -100, -1), -2);
EXPECT_EQ(uniform_upper_bound<float>(IntervalOpenOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound<float>(IntervalClosedOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalOpenOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalClosedOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, -100, -1), -1);
EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, -100, -1), -1);
EXPECT_GT(uniform_upper_bound<float>(IntervalOpenClosed, -2.0, -1.0), -1.0);
EXPECT_GT(uniform_upper_bound<float>(IntervalClosedClosed, -2.0, -1.0), -1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalOpenClosed, -2.0, -1.0), -1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalClosedClosed, -2.0, -1.0),
-1.0);
EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, 1.0, 2.0), 1.0);
EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, +0.0), 1.0);
EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -0.0), 1.0);
EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0), 1.0);
}
TEST_F(UniformHelperTest, UniformBoundFunctionsIntBounds) {
// Verifies the saturating nature of uniform_lower_bound and
// uniform_upper_bound
constexpr IntervalOpenOpenTag IntervalOpenOpen;
// uint max.
constexpr auto m = (std::numeric_limits<uint64_t>::max)();
EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0u, 0u));
EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m, m));
EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m - 1, m - 1));
EXPECT_EQ(0, uniform_upper_bound(IntervalOpenOpen, 0u, 0u));
EXPECT_EQ(m - 1, uniform_upper_bound(IntervalOpenOpen, m, m));
// int min/max
constexpr auto l = (std::numeric_limits<int64_t>::min)();
constexpr auto r = (std::numeric_limits<int64_t>::max)();
EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0, 0));
EXPECT_EQ(l + 1, uniform_lower_bound(IntervalOpenOpen, l, l));
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r - 1, r - 1));
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r, r));
EXPECT_EQ(-1, uniform_upper_bound(IntervalOpenOpen, 0, 0));
EXPECT_EQ(l, uniform_upper_bound(IntervalOpenOpen, l, l));
EXPECT_EQ(r - 1, uniform_upper_bound(IntervalOpenOpen, r, r));
}
TEST_F(UniformHelperTest, UniformBoundFunctionsRealBounds) {
// absl::uniform_real_distribution natively assumes IntervalClosedOpen;
// use the inverse here so each bound has to change.
constexpr IntervalOpenClosedTag IntervalOpenClosed;
// Edge cases: the next value toward itself is itself.
EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, 1.0, 1.0));
EXPECT_EQ(1.0f, uniform_lower_bound(IntervalOpenClosed, 1.0f, 1.0f));
// rightmost and leftmost finite values.
constexpr auto r = (std::numeric_limits<double>::max)();
const auto re = std::nexttoward(r, 0.0);
constexpr auto l = -r;
const auto le = std::nexttoward(l, 0.0);
EXPECT_EQ(l, uniform_lower_bound(IntervalOpenClosed, l, l)); // (l,l)
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, r, r)); // (r,r)
EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, r)); // (l,r)
EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, 0.0)); // (l, 0)
EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, le)); // (l, le)
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, re, r)); // (re, r)
EXPECT_EQ(le, uniform_upper_bound(IntervalOpenClosed, l, l)); // (l,l)
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, r, r)); // (r,r)
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, r)); // (l,r)
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, re)); // (l,re)
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, 0.0, r)); // (0, r)
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, re, r)); // (re, r)
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, le, re)); // (le, re)
const double e = std::nextafter(1.0, 2.0); // 1 + epsilon
const double f = std::nextafter(1.0, 0.0); // 1 - epsilon
// (1.0, 1.0 + epsilon)
EXPECT_EQ(e, uniform_lower_bound(IntervalOpenClosed, 1.0, e));
EXPECT_EQ(std::nextafter(e, 2.0),
uniform_upper_bound(IntervalOpenClosed, 1.0, e));
// (1.0-epsilon, 1.0)
EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, f, 1.0));
EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, f, 1.0));
// denorm cases.
const double g = std::numeric_limits<double>::denorm_min();
const double h = std::nextafter(g, 1.0);
// (0, denorm_min)
EXPECT_EQ(g, uniform_lower_bound(IntervalOpenClosed, 0.0, g));
EXPECT_EQ(h, uniform_upper_bound(IntervalOpenClosed, 0.0, g));
// (denorm_min, 1.0)
EXPECT_EQ(h, uniform_lower_bound(IntervalOpenClosed, g, 1.0));
EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, g, 1.0));
// Edge cases: invalid bounds.
EXPECT_EQ(f, uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0));
}
struct Invalid {};
template <typename A, typename B>
auto InferredUniformReturnT(int) -> uniform_inferred_return_t<A, B>;
template <typename, typename>
Invalid InferredUniformReturnT(...);
// Given types <A, B, Expect>, CheckArgsInferType() verifies that
//
// uniform_inferred_return_t<A, B> and
// uniform_inferred_return_t<B, A>
//
// returns the type "Expect".
//
// This interface can also be used to assert that a given inferred return types
// are invalid. Writing:
//
// CheckArgsInferType<float, int, Invalid>()
//
// will assert that this overload does not exist.
template <typename A, typename B, typename Expect>
void CheckArgsInferType() {
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredUniformReturnT<A, B>(0))>,
std::is_same<Expect,
decltype(InferredUniformReturnT<B, A>(0))>>::value,
"");
}
TEST_F(UniformHelperTest, UniformTypeInference) {
// Infers common types.
CheckArgsInferType<uint16_t, uint16_t, uint16_t>();
CheckArgsInferType<uint32_t, uint32_t, uint32_t>();
CheckArgsInferType<uint64_t, uint64_t, uint64_t>();
CheckArgsInferType<int16_t, int16_t, int16_t>();
CheckArgsInferType<int32_t, int32_t, int32_t>();
CheckArgsInferType<int64_t, int64_t, int64_t>();
CheckArgsInferType<float, float, float>();
CheckArgsInferType<double, double, double>();
// Properly promotes uint16_t.
CheckArgsInferType<uint16_t, uint32_t, uint32_t>();
CheckArgsInferType<uint16_t, uint64_t, uint64_t>();
CheckArgsInferType<uint16_t, int32_t, int32_t>();
CheckArgsInferType<uint16_t, int64_t, int64_t>();
CheckArgsInferType<uint16_t, float, float>();
CheckArgsInferType<uint16_t, double, double>();
// Properly promotes int16_t.
CheckArgsInferType<int16_t, int32_t, int32_t>();
CheckArgsInferType<int16_t, int64_t, int64_t>();
CheckArgsInferType<int16_t, float, float>();
CheckArgsInferType<int16_t, double, double>();
// Invalid (u)int16_t-pairings do not compile.
// See "CheckArgsInferType" comments above, for how this is achieved.
CheckArgsInferType<uint16_t, int16_t, Invalid>();
CheckArgsInferType<int16_t, uint32_t, Invalid>();
CheckArgsInferType<int16_t, uint64_t, Invalid>();
// Properly promotes uint32_t.
CheckArgsInferType<uint32_t, uint64_t, uint64_t>();
CheckArgsInferType<uint32_t, int64_t, int64_t>();
CheckArgsInferType<uint32_t, double, double>();
// Properly promotes int32_t.
CheckArgsInferType<int32_t, int64_t, int64_t>();
CheckArgsInferType<int32_t, double, double>();
// Invalid (u)int32_t-pairings do not compile.
CheckArgsInferType<uint32_t, int32_t, Invalid>();
CheckArgsInferType<int32_t, uint64_t, Invalid>();
CheckArgsInferType<int32_t, float, Invalid>();
CheckArgsInferType<uint32_t, float, Invalid>();
// Invalid (u)int64_t-pairings do not compile.
CheckArgsInferType<uint64_t, int64_t, Invalid>();
CheckArgsInferType<int64_t, float, Invalid>();
CheckArgsInferType<int64_t, double, Invalid>();
// Properly promotes float.
CheckArgsInferType<float, double, double>();
}
} // namespace