Export of internal Abseil changes.

--
9c4ef32276054fba6a116c01cd4b3fd278f59ece by Andy Soffer <asoffer@google.com>:

Remove support for unused arbitrary-width output in FastUniformBits. Width
should be inferred from the requested return UIntType.

PiperOrigin-RevId: 257189319

--
e3326329d02171a301cc3d6ae617ed448472b728 by Abseil Team <absl-team@google.com>:

Update comments to make clear that absl::Format(std::string *, ...) appends to the provided string.

PiperOrigin-RevId: 257058043

--
e2096b06d714fba3ea2c885d670a42efd872765c by Xiaoyi Zhang <zhangxy@google.com>:

Fix compilation error on MSVC 2017. The root cause seems to be a compiler bug
in VS 2017 about pack expansion with multiple parameter packs, specifically `MakeVisitationMatrixImpl::Run` is triggering compiler error "error C3528: 'BoundIndices': the number of elements in this pack expansion does not match the number of elements in 'EndIndices'".
Work around this issue by using only one parameter pack `CurrIndices` in `MakeVisitationMatrixImpl::Run`.

PiperOrigin-RevId: 257040381

--
9ab75ff27b2513583fffc1233e6568aa96be36f7 by Matt Calabrese <calabrese@google.com>:

Internal change.

PiperOrigin-RevId: 257039041
GitOrigin-RevId: 9c4ef32276054fba6a116c01cd4b3fd278f59ece
Change-Id: I5f708bb03aff93948502394a413260af2a8a273b
This commit is contained in:
Abseil Team 2019-07-09 07:37:37 -07:00 committed by Matt Calabrese
parent 3c98fcc046
commit 44efe96dfc
5 changed files with 50 additions and 211 deletions

View file

@ -257,9 +257,9 @@ void BM_Thread(benchmark::State& state) {
BENCHMARK_TEMPLATE(BM_ShuffleReuse, Engine, 100); \
BENCHMARK_TEMPLATE(BM_ShuffleReuse, Engine, 1000); \
BENCHMARK_TEMPLATE(BM_Dist, Engine, \
absl::random_internal::FastUniformBits<uint32_t, 32>); \
absl::random_internal::FastUniformBits<uint32_t>); \
BENCHMARK_TEMPLATE(BM_Dist, Engine, \
absl::random_internal::FastUniformBits<uint64_t, 64>); \
absl::random_internal::FastUniformBits<uint64_t>); \
BENCHMARK_TEMPLATE(BM_Dist, Engine, std::uniform_int_distribution<int32_t>); \
BENCHMARK_TEMPLATE(BM_Dist, Engine, std::uniform_int_distribution<int64_t>); \
BENCHMARK_TEMPLATE(BM_Dist, Engine, \

View file

@ -38,14 +38,12 @@ constexpr typename URBG::result_type constexpr_range() {
// from a type which conforms to the [rand.req.urbg] concept.
// Parameterized by:
// `UIntType`: the result (output) type
// `Width`: binary output width
//
// The std::independent_bits_engine [rand.adapt.ibits] adaptor can be
// instantiated from an existing generator through a copy or a move. It does
// not, however, facilitate the production of pseudorandom bits from an un-owned
// generator that will outlive the std::independent_bits_engine instance.
template <typename UIntType = uint64_t,
size_t Width = std::numeric_limits<UIntType>::digits>
template <typename UIntType = uint64_t>
class FastUniformBits {
static_assert(std::is_unsigned<UIntType>::value,
"Class-template FastUniformBits<> must be parameterized using "
@ -53,29 +51,14 @@ class FastUniformBits {
// `kWidth` is the width, in binary digits, of the output. By default it is
// the number of binary digits in the `result_type`.
static constexpr size_t kWidth = Width;
static_assert(kWidth > 0,
"Class-template FastUniformBits<> Width argument must be > 0");
static_assert(kWidth <= std::numeric_limits<UIntType>::digits,
"Class-template FastUniformBits<> Width argument must be <= "
"width of UIntType.");
static constexpr bool kIsMaxWidth =
(kWidth >= std::numeric_limits<UIntType>::digits);
// Computes a mask of `n` bits for the `UIntType`.
static constexpr UIntType constexpr_mask(size_t n) {
return (UIntType(1) << n) - 1;
}
static constexpr size_t kWidth = std::numeric_limits<UIntType>::digits;
public:
using result_type = UIntType;
static constexpr result_type(min)() { return 0; }
static constexpr result_type(max)() {
return kIsMaxWidth ? (std::numeric_limits<result_type>::max)()
: constexpr_mask(kWidth);
return (std::numeric_limits<result_type>::max)();
}
template <typename URBG>
@ -166,7 +149,6 @@ class FastUniformBitsURBGConstants {
// URBG::result_type values are combined into an output_value.
// Parameterized by the FastUniformBits parameters:
// `UIntType`: output type.
// `Width`: binary output width,
// `URNG`: The underlying UniformRandomNumberGenerator.
//
// The looping constants describe the sets of loop counters and mask values
@ -177,10 +159,10 @@ class FastUniformBitsURBGConstants {
// bit per variate.
//
// See [rand.adapt.ibits] for more details on the use of these constants.
template <typename UIntType, size_t Width, typename URBG>
template <typename UIntType, typename URBG>
class FastUniformBitsLoopingConstants {
private:
static constexpr size_t kWidth = Width;
static constexpr size_t kWidth = std::numeric_limits<UIntType>::digits;
using urbg_result_type = typename URBG::result_type;
using uint_result_type = UIntType;
@ -229,19 +211,19 @@ class FastUniformBitsLoopingConstants {
"Class-template FastUniformBitsLoopingConstants::kW0 too small.");
};
template <typename UIntType, size_t Width>
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType, Width>::result_type
FastUniformBits<UIntType, Width>::operator()(
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::operator()(
URBG& g) { // NOLINT(runtime/references)
using constants = FastUniformBitsURBGConstants<URBG>;
return Generate(
g, std::integral_constant<bool, constants::kRangeMask >= (max)()>{});
}
template <typename UIntType, size_t Width>
template <typename UIntType>
template <typename URBG>
typename URBG::result_type FastUniformBits<UIntType, Width>::Variate(
typename URBG::result_type FastUniformBits<UIntType>::Variate(
URBG& g) { // NOLINT(runtime/references)
using constants = FastUniformBitsURBGConstants<URBG>;
if (constants::kPowerOfTwo) {
@ -256,10 +238,10 @@ typename URBG::result_type FastUniformBits<UIntType, Width>::Variate(
return u;
}
template <typename UIntType, size_t Width>
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType, Width>::result_type
FastUniformBits<UIntType, Width>::Generate(
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(
URBG& g, // NOLINT(runtime/references)
std::true_type /* avoid_looping */) {
// The width of the result_type is less than than the width of the random bits
@ -268,10 +250,10 @@ FastUniformBits<UIntType, Width>::Generate(
return Variate(g) & (max)();
}
template <typename UIntType, size_t Width>
template <typename UIntType>
template <typename URBG>
typename FastUniformBits<UIntType, Width>::result_type
FastUniformBits<UIntType, Width>::Generate(
typename FastUniformBits<UIntType>::result_type
FastUniformBits<UIntType>::Generate(
URBG& g, // NOLINT(runtime/references)
std::false_type /* avoid_looping */) {
// The width of the result_type is wider than the number of random bits
@ -279,7 +261,7 @@ FastUniformBits<UIntType, Width>::Generate(
// using a shift and mask. The constants type generates the parameters used
// ensure that the bits are distributed across all the invocations of the
// underlying URNG.
using constants = FastUniformBitsLoopingConstants<UIntType, Width, URBG>;
using constants = FastUniformBitsLoopingConstants<UIntType, URBG>;
result_type s = 0;
for (size_t n = 0; n < constants::kN0; ++n) {

View file

@ -45,57 +45,6 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) {
}
}
TEST(FastUniformBitsTest, TypeBoundaries32) {
// Tests that FastUniformBits can adapt to 32-bit boundaries.
absl::random_internal::FastUniformBits<uint32_t, 1> a;
absl::random_internal::FastUniformBits<uint32_t, 31> b;
absl::random_internal::FastUniformBits<uint32_t, 32> c;
{
std::mt19937 gen; // 32-bit
a(gen);
b(gen);
c(gen);
}
{
std::mt19937_64 gen; // 64-bit
a(gen);
b(gen);
c(gen);
}
}
TEST(FastUniformBitsTest, TypeBoundaries64) {
// Tests that FastUniformBits can adapt to 64-bit boundaries.
absl::random_internal::FastUniformBits<uint64_t, 1> a;
absl::random_internal::FastUniformBits<uint64_t, 31> b;
absl::random_internal::FastUniformBits<uint64_t, 32> c;
absl::random_internal::FastUniformBits<uint64_t, 33> d;
absl::random_internal::FastUniformBits<uint64_t, 63> e;
absl::random_internal::FastUniformBits<uint64_t, 64> f;
{
std::mt19937 gen; // 32-bit
a(gen);
b(gen);
c(gen);
d(gen);
e(gen);
f(gen);
}
{
std::mt19937_64 gen; // 64-bit
a(gen);
b(gen);
c(gen);
d(gen);
e(gen);
f(gen);
}
}
class UrngOddbits {
public:
using result_type = uint8_t;
@ -135,18 +84,6 @@ TEST(FastUniformBitsTest, FastUniformBitsDetails) {
static_assert(constants::kRangeMask == 0x0f,
"constants::kRangeMask == false");
}
{
using looping = FastUniformBitsLoopingConstants<uint32_t, 31, Urng4bits>;
// To get 31 bits from a 4-bit generator, issue 8 calls and extract 4 bits
// per call on all except the first.
static_assert(looping::kN0 == 1, "looping::kN0");
static_assert(looping::kW0 == 3, "looping::kW0");
static_assert(looping::kM0 == 0x7, "looping::kM0");
// (The second set of calls, kN1, will not do anything.)
static_assert(looping::kN1 == 8, "looping::kN1");
static_assert(looping::kW1 == 4, "looping::kW1");
static_assert(looping::kM1 == 0xf, "looping::kM1");
}
// ~7-bit URBG
{
@ -158,31 +95,6 @@ TEST(FastUniformBitsTest, FastUniformBitsDetails) {
static_assert(constants::kRangeMask == 0x7f,
"constants::kRangeMask == 0x7f");
}
{
using looping = FastUniformBitsLoopingConstants<uint64_t, 60, UrngOddbits>;
// To get 60 bits from a 7-bit generator, issue 10 calls and extract 6 bits
// per call, discarding the excess entropy.
static_assert(looping::kN0 == 10, "looping::kN0");
static_assert(looping::kW0 == 6, "looping::kW0");
static_assert(looping::kM0 == 0x3f, "looping::kM0");
// (The second set of calls, kN1, will not do anything.)
static_assert(looping::kN1 == 10, "looping::kN1");
static_assert(looping::kW1 == 7, "looping::kW1");
static_assert(looping::kM1 == 0x7f, "looping::kM1");
}
{
using looping = FastUniformBitsLoopingConstants<uint64_t, 63, UrngOddbits>;
// To get 63 bits from a 7-bit generator, issue 10 calls--the same as we
// would issue for 60 bits--however this time we use two groups. The first
// group (kN0) will issue 7 calls, extracting 6 bits per call.
static_assert(looping::kN0 == 7, "looping::kN0");
static_assert(looping::kW0 == 6, "looping::kW0");
static_assert(looping::kM0 == 0x3f, "looping::kM0");
// The second group (kN1) will issue 3 calls, extracting 7 bits per call.
static_assert(looping::kN1 == 10, "looping::kN1");
static_assert(looping::kW1 == 7, "looping::kW1");
static_assert(looping::kM1 == 0x7f, "looping::kM1");
}
}
TEST(FastUniformBitsTest, Urng4_VariousOutputs) {
@ -192,33 +104,6 @@ TEST(FastUniformBitsTest, Urng4_VariousOutputs) {
Urng32bits urng32;
// 8-bit types
{
absl::random_internal::FastUniformBits<uint8_t, 1> fast1;
EXPECT_EQ(0x1, fast1(urng4));
EXPECT_EQ(0x1, fast1(urng32));
}
{
absl::random_internal::FastUniformBits<uint8_t, 2> fast2;
EXPECT_EQ(0x1, fast2(urng4));
EXPECT_EQ(0x1, fast2(urng32));
}
{
absl::random_internal::FastUniformBits<uint8_t, 4> fast4;
EXPECT_EQ(0x1, fast4(urng4));
EXPECT_EQ(0x1, fast4(urng32));
}
{
absl::random_internal::FastUniformBits<uint8_t, 6> fast6;
EXPECT_EQ(0x9, fast6(urng4)); // b001001 (2x3)
EXPECT_EQ(0x1, fast6(urng32));
}
{
absl::random_internal::FastUniformBits<uint8_t, 6> fast7;
EXPECT_EQ(0x9, fast7(urng4)); // b00001001 (1x4 + 1x3)
EXPECT_EQ(0x1, fast7(urng32));
}
{
absl::random_internal::FastUniformBits<uint8_t> fast8;
EXPECT_EQ(0x11, fast8(urng4));
@ -226,22 +111,6 @@ TEST(FastUniformBitsTest, Urng4_VariousOutputs) {
}
// 16-bit types
{
absl::random_internal::FastUniformBits<uint16_t, 10> fast10;
EXPECT_EQ(0x91, fast10(urng4)); // b 0010010001 (2x3 + 1x4)
EXPECT_EQ(0x1, fast10(urng32));
}
{
absl::random_internal::FastUniformBits<uint16_t, 11> fast11;
EXPECT_EQ(0x111, fast11(urng4));
EXPECT_EQ(0x1, fast11(urng32));
}
{
absl::random_internal::FastUniformBits<uint16_t, 12> fast12;
EXPECT_EQ(0x111, fast12(urng4));
EXPECT_EQ(0x1, fast12(urng32));
}
{
absl::random_internal::FastUniformBits<uint16_t> fast16;
EXPECT_EQ(0x1111, fast16(urng4));
@ -249,17 +118,6 @@ TEST(FastUniformBitsTest, Urng4_VariousOutputs) {
}
// 32-bit types
{
absl::random_internal::FastUniformBits<uint32_t, 21> fast21;
EXPECT_EQ(0x49111, fast21(urng4)); // b 001001001 000100010001 (3x3 + 3x4)
EXPECT_EQ(0x1, fast21(urng32));
}
{
absl::random_internal::FastUniformBits<uint32_t, 24> fast24;
EXPECT_EQ(0x111111, fast24(urng4));
EXPECT_EQ(0x1, fast24(urng32));
}
{
absl::random_internal::FastUniformBits<uint32_t> fast32;
EXPECT_EQ(0x11111111, fast32(urng4));
@ -267,19 +125,6 @@ TEST(FastUniformBitsTest, Urng4_VariousOutputs) {
}
// 64-bit types
{
absl::random_internal::FastUniformBits<uint64_t, 5> fast5;
EXPECT_EQ(0x9, fast5(urng4));
EXPECT_EQ(0x1, fast5(urng32));
}
{
absl::random_internal::FastUniformBits<uint64_t, 48> fast48;
EXPECT_EQ(0x111111111111, fast48(urng4));
// computes in 2 steps, should be 24 << 24
EXPECT_EQ(0x000001000001, fast48(urng32));
}
{
absl::random_internal::FastUniformBits<uint64_t> fast64;
EXPECT_EQ(0x1111111111111111, fast64(urng4));

View file

@ -449,7 +449,7 @@ class FormatRawSink {
// additional arguments.
//
// By default, `std::string` and `std::ostream` are supported as destination
// objects.
// objects. If a `std::string` is used the formatted string is appended to it.
//
// `absl::Format()` is a generic version of `absl::StrFormat(), for custom
// sinks. The format string, like format strings for `StrFormat()`, is checked

View file

@ -204,7 +204,7 @@ template <class Op, class... Vs>
using VisitIndicesResultT = typename VisitIndicesResultImpl<Op, Vs...>::type;
template <class ReturnType, class FunctionObject, class EndIndices,
std::size_t... BoundIndices>
class BoundIndices>
struct MakeVisitationMatrix;
template <class ReturnType, class FunctionObject, std::size_t... Indices>
@ -218,7 +218,7 @@ constexpr ReturnType call_with_indices(FunctionObject&& function) {
template <class ReturnType, class FunctionObject, std::size_t... BoundIndices>
struct MakeVisitationMatrix<ReturnType, FunctionObject, index_sequence<>,
BoundIndices...> {
index_sequence<BoundIndices...>> {
using ResultType = ReturnType (*)(FunctionObject&&);
static constexpr ResultType Run() {
return &call_with_indices<ReturnType, FunctionObject,
@ -226,24 +226,34 @@ struct MakeVisitationMatrix<ReturnType, FunctionObject, index_sequence<>,
}
};
template <typename Is, std::size_t J>
struct AppendToIndexSequence;
template <typename Is, std::size_t J>
using AppendToIndexSequenceT = typename AppendToIndexSequence<Is, J>::type;
template <std::size_t... Is, std::size_t J>
struct AppendToIndexSequence<index_sequence<Is...>, J> {
using type = index_sequence<Is..., J>;
};
template <class ReturnType, class FunctionObject, class EndIndices,
class CurrIndices, std::size_t... BoundIndices>
class CurrIndices, class BoundIndices>
struct MakeVisitationMatrixImpl;
template <class ReturnType, class FunctionObject, std::size_t... EndIndices,
std::size_t... CurrIndices, std::size_t... BoundIndices>
struct MakeVisitationMatrixImpl<
ReturnType, FunctionObject, index_sequence<EndIndices...>,
index_sequence<CurrIndices...>, BoundIndices...> {
template <class ReturnType, class FunctionObject, class EndIndices,
std::size_t... CurrIndices, class BoundIndices>
struct MakeVisitationMatrixImpl<ReturnType, FunctionObject, EndIndices,
index_sequence<CurrIndices...>, BoundIndices> {
using ResultType = SimpleArray<
typename MakeVisitationMatrix<ReturnType, FunctionObject,
index_sequence<EndIndices...>>::ResultType,
typename MakeVisitationMatrix<ReturnType, FunctionObject, EndIndices,
index_sequence<>>::ResultType,
sizeof...(CurrIndices)>;
static constexpr ResultType Run() {
return {{MakeVisitationMatrix<ReturnType, FunctionObject,
index_sequence<EndIndices...>,
BoundIndices..., CurrIndices>::Run()...}};
return {{MakeVisitationMatrix<
ReturnType, FunctionObject, EndIndices,
AppendToIndexSequenceT<BoundIndices, CurrIndices>>::Run()...}};
}
};
@ -251,10 +261,11 @@ template <class ReturnType, class FunctionObject, std::size_t HeadEndIndex,
std::size_t... TailEndIndices, std::size_t... BoundIndices>
struct MakeVisitationMatrix<ReturnType, FunctionObject,
index_sequence<HeadEndIndex, TailEndIndices...>,
BoundIndices...>
: MakeVisitationMatrixImpl<
ReturnType, FunctionObject, index_sequence<TailEndIndices...>,
absl::make_index_sequence<HeadEndIndex>, BoundIndices...> {};
index_sequence<BoundIndices...>>
: MakeVisitationMatrixImpl<ReturnType, FunctionObject,
index_sequence<TailEndIndices...>,
absl::make_index_sequence<HeadEndIndex>,
index_sequence<BoundIndices...>> {};
struct UnreachableSwitchCase {
template <class Op>
@ -423,7 +434,8 @@ struct VisitIndicesFallback {
static VisitIndicesResultT<Op, SizeT...> Run(Op&& op, SizeT... indices) {
return AccessSimpleArray(
MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
index_sequence<(EndIndices + 1)...>>::Run(),
index_sequence<(EndIndices + 1)...>,
index_sequence<>>::Run(),
(indices + 1)...)(absl::forward<Op>(op));
}
};