- b9a479321581cd0293f124041bf5c06f456afec1 Adds exception safety tests for absl::make_unique<T>(...) by Abseil Team <absl-team@google.com>
- 78c61364007f6ab66155c151d0061bbec89c3dbd Update variadic visitation to use a switch statement when... by Matt Calabrese <calabrese@google.com> - b62eb9546087e0001307a741fcdf023b2d156966 Merge GitHub PR #130 - Add MIPS support to GetProgramCoun... by Derek Mauro <dmauro@google.com> - 09ab5739de33c8f1bebab2bb70bf7d4331348f05 Update ABSL_ASSERT to silence clang-tidy warnings about c... by Matt Calabrese <calabrese@google.com> - e73ee389ce8fe1a90738973c219ebbb19bb389f3 Update unary visitation to use a switch statement when th... by Matt Calabrese <calabrese@google.com> - c8734ccf475b856c95220f21a5ec4f44302cb5ce Work around a MSVC bug for absl::variant, by making `Acce... by Xiaoyi Zhang <zhangxy@google.com> GitOrigin-RevId: b9a479321581cd0293f124041bf5c06f456afec1 Change-Id: Idb6fc906087c0a4e6fc5c75a391c7f73101c613e
This commit is contained in:
parent
e5be80532b
commit
eb686c069f
8 changed files with 610 additions and 40 deletions
|
@ -194,8 +194,9 @@ enum LinkerInitialized {
|
|||
#if defined(NDEBUG)
|
||||
#define ABSL_ASSERT(expr) (false ? (void)(expr) : (void)0)
|
||||
#else
|
||||
#define ABSL_ASSERT(expr) \
|
||||
(ABSL_PREDICT_TRUE((expr)) ? (void)0 : [] { assert(false && #expr); }())
|
||||
#define ABSL_ASSERT(expr) \
|
||||
(ABSL_PREDICT_TRUE((expr)) ? (void)0 \
|
||||
: [] { assert(false && #expr); }()) // NOLINT
|
||||
#endif
|
||||
|
||||
#endif // ABSL_BASE_MACROS_H_
|
||||
|
|
|
@ -18,6 +18,7 @@ load(
|
|||
"//absl:copts.bzl",
|
||||
"ABSL_DEFAULT_COPTS",
|
||||
"ABSL_TEST_COPTS",
|
||||
"ABSL_EXCEPTIONS_FLAG",
|
||||
)
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
@ -45,3 +46,16 @@ cc_test(
|
|||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "memory_exception_safety_test",
|
||||
srcs = [
|
||||
"memory_exception_safety_test.cc",
|
||||
],
|
||||
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
|
||||
deps = [
|
||||
":memory",
|
||||
"//absl/base:exception_safety_testing",
|
||||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -49,4 +49,23 @@ absl_test(
|
|||
)
|
||||
|
||||
|
||||
# test memory_exception_safety_test
|
||||
set(MEMORY_EXCEPTION_SAFETY_TEST_SRC "memory_exception_safety_test.cc")
|
||||
set(MEMORY_EXCEPTION_SAFETY_TEST_PUBLIC_LIBRARIES
|
||||
absl::memory
|
||||
absl_base_internal_exception_safety_testing
|
||||
)
|
||||
|
||||
absl_test(
|
||||
TARGET
|
||||
memory_exception_safety_test
|
||||
SOURCES
|
||||
${MEMORY_EXCEPTION_SAFETY_TEST_SRC}
|
||||
PUBLIC_LIBRARIES
|
||||
${MEMORY_EXCEPTION_SAFETY_TEST_PUBLIC_LIBRARIES}
|
||||
PRIVATE_COMPILE_FLAGS
|
||||
${ABSL_EXCEPTIONS_FLAG}
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
|
49
absl/memory/memory_exception_safety_test.cc
Normal file
49
absl/memory/memory_exception_safety_test.cc
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "absl/base/internal/exception_safety_testing.h"
|
||||
|
||||
namespace absl {
|
||||
namespace {
|
||||
|
||||
using Thrower = ::testing::ThrowingValue<>;
|
||||
|
||||
TEST(MakeUnique, CheckForLeaks) {
|
||||
constexpr int kValue = 321;
|
||||
constexpr size_t kLength = 10;
|
||||
auto tester = testing::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(Thrower(kValue))
|
||||
// Ensures make_unique does not modify the input. The real
|
||||
// test, though, is ConstructorTracker checking for leaks.
|
||||
.WithInvariants(testing::strong_guarantee);
|
||||
|
||||
EXPECT_TRUE(tester.Test([](Thrower* thrower) {
|
||||
static_cast<void>(absl::make_unique<Thrower>(*thrower));
|
||||
}));
|
||||
|
||||
EXPECT_TRUE(tester.Test([](Thrower* thrower) {
|
||||
static_cast<void>(absl::make_unique<Thrower>(std::move(*thrower)));
|
||||
}));
|
||||
|
||||
// Test T[n] overload
|
||||
EXPECT_TRUE(tester.Test([&](Thrower*) {
|
||||
static_cast<void>(absl::make_unique<Thrower[]>(kLength));
|
||||
}));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace absl
|
|
@ -247,6 +247,19 @@ cc_test(
|
|||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "variant_benchmark",
|
||||
srcs = [
|
||||
"variant_benchmark.cc",
|
||||
],
|
||||
copts = ABSL_TEST_COPTS,
|
||||
deps = [
|
||||
":variant",
|
||||
"//absl/utility",
|
||||
"@com_github_google_benchmark//:benchmark_main",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "variant_exception_safety_test",
|
||||
size = "small",
|
||||
|
|
|
@ -19,15 +19,19 @@
|
|||
#ifndef ABSL_TYPES_variant_internal_H_
|
||||
#define ABSL_TYPES_variant_internal_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
|
||||
#include "absl/base/config.h"
|
||||
#include "absl/base/internal/identity.h"
|
||||
#include "absl/base/internal/inline_variable.h"
|
||||
#include "absl/base/internal/invoke.h"
|
||||
#include "absl/base/macros.h"
|
||||
#include "absl/base/optimization.h"
|
||||
#include "absl/meta/type_traits.h"
|
||||
#include "absl/types/bad_variant_access.h"
|
||||
|
@ -119,6 +123,8 @@ using GiveQualsToT = typename GiveQualsTo<T, U>::type;
|
|||
template <std::size_t I>
|
||||
using SizeT = std::integral_constant<std::size_t, I>;
|
||||
|
||||
using NPos = SizeT<variant_npos>;
|
||||
|
||||
template <class Variant, class T, class = void>
|
||||
struct IndexOfConstructedType {};
|
||||
|
||||
|
@ -248,19 +254,270 @@ struct MakeVisitationMatrix<ReturnType, FunctionObject,
|
|||
ReturnType, FunctionObject, index_sequence<TailEndIndices...>,
|
||||
absl::make_index_sequence<HeadEndIndex>, BoundIndices...> {};
|
||||
|
||||
template <std::size_t... EndIndices, class Op, class... SizeT>
|
||||
VisitIndicesResultT<Op, SizeT...> visit_indices(Op&& op, SizeT... indices) {
|
||||
return AccessSimpleArray(
|
||||
MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
|
||||
index_sequence<(EndIndices + 1)...>>::Run(),
|
||||
(indices + 1)...)(absl::forward<Op>(op));
|
||||
}
|
||||
struct UnreachableSwitchCase {
|
||||
template <class Op>
|
||||
[[noreturn]] static VisitIndicesResultT<Op, std::size_t> Run(
|
||||
Op&& /*ignored*/) {
|
||||
#if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \
|
||||
(defined(__GNUC__) && !defined(__clang__))
|
||||
__builtin_unreachable();
|
||||
#elif defined(_MSC_VER)
|
||||
__assume(false);
|
||||
#else
|
||||
// Try to use assert of false being identified as an unreachable intrinsic.
|
||||
// NOTE: We use assert directly to increase chances of exploiting an assume
|
||||
// intrinsic.
|
||||
assert(false); // NOLINT
|
||||
|
||||
// Hack to silence potential no return warning -- cause an infinite loop.
|
||||
return Run(absl::forward<Op>(op));
|
||||
#endif // Checks for __builtin_unreachable
|
||||
}
|
||||
};
|
||||
|
||||
template <class Op, std::size_t I>
|
||||
struct ReachableSwitchCase {
|
||||
static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) {
|
||||
return absl::base_internal::Invoke(absl::forward<Op>(op), SizeT<I>());
|
||||
}
|
||||
};
|
||||
|
||||
// The number 33 is just a guess at a reasonable maximum to our switch. It is
|
||||
// not based on any analysis. The reason it is a power of 2 plus 1 instead of a
|
||||
// power of 2 is because the number was picked to correspond to a power of 2
|
||||
// amount of "normal" alternatives, plus one for the possibility of the user
|
||||
// providing "monostate" in addition to the more natural alternatives.
|
||||
ABSL_INTERNAL_INLINE_CONSTEXPR(std::size_t, MaxUnrolledVisitCases, 33);
|
||||
|
||||
// Note: The default-definition is for unreachable cases.
|
||||
template <bool IsReachable>
|
||||
struct PickCaseImpl {
|
||||
template <class Op, std::size_t I>
|
||||
using Apply = UnreachableSwitchCase;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct PickCaseImpl</*IsReachable =*/true> {
|
||||
template <class Op, std::size_t I>
|
||||
using Apply = ReachableSwitchCase<Op, I>;
|
||||
};
|
||||
|
||||
// Note: This form of dance with template aliases is to make sure that we
|
||||
// instantiate a number of templates proportional to the number of variant
|
||||
// alternatives rather than a number of templates proportional to our
|
||||
// maximum unrolled amount of visitation cases (aliases are effectively
|
||||
// "free" whereas other template instantiations are costly).
|
||||
template <class Op, std::size_t I, std::size_t EndIndex>
|
||||
using PickCase = typename PickCaseImpl<(I < EndIndex)>::template Apply<Op, I>;
|
||||
|
||||
template <class ReturnType>
|
||||
[[noreturn]] ReturnType TypedThrowBadVariantAccess() {
|
||||
absl::variant_internal::ThrowBadVariantAccess();
|
||||
}
|
||||
|
||||
// Given N variant sizes, determine the number of cases there would need to be
|
||||
// in a single switch-statement that would cover every possibility in the
|
||||
// corresponding N-ary visit operation.
|
||||
template <std::size_t... NumAlternatives>
|
||||
struct NumCasesOfSwitch;
|
||||
|
||||
template <std::size_t HeadNumAlternatives, std::size_t... TailNumAlternatives>
|
||||
struct NumCasesOfSwitch<HeadNumAlternatives, TailNumAlternatives...> {
|
||||
static constexpr std::size_t value =
|
||||
(HeadNumAlternatives + 1) *
|
||||
NumCasesOfSwitch<TailNumAlternatives...>::value;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct NumCasesOfSwitch<> {
|
||||
static constexpr std::size_t value = 1;
|
||||
};
|
||||
|
||||
// A switch statement optimizes better than the table of function pointers.
|
||||
template <std::size_t EndIndex>
|
||||
struct VisitIndicesSwitch {
|
||||
static_assert(EndIndex <= MaxUnrolledVisitCases,
|
||||
"Maximum unrolled switch size exceeded.");
|
||||
|
||||
template <class Op>
|
||||
static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) {
|
||||
switch (i) {
|
||||
case 0:
|
||||
return PickCase<Op, 0, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 1:
|
||||
return PickCase<Op, 1, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 2:
|
||||
return PickCase<Op, 2, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 3:
|
||||
return PickCase<Op, 3, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 4:
|
||||
return PickCase<Op, 4, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 5:
|
||||
return PickCase<Op, 5, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 6:
|
||||
return PickCase<Op, 6, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 7:
|
||||
return PickCase<Op, 7, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 8:
|
||||
return PickCase<Op, 8, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 9:
|
||||
return PickCase<Op, 9, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 10:
|
||||
return PickCase<Op, 10, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 11:
|
||||
return PickCase<Op, 11, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 12:
|
||||
return PickCase<Op, 12, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 13:
|
||||
return PickCase<Op, 13, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 14:
|
||||
return PickCase<Op, 14, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 15:
|
||||
return PickCase<Op, 15, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 16:
|
||||
return PickCase<Op, 16, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 17:
|
||||
return PickCase<Op, 17, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 18:
|
||||
return PickCase<Op, 18, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 19:
|
||||
return PickCase<Op, 19, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 20:
|
||||
return PickCase<Op, 20, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 21:
|
||||
return PickCase<Op, 21, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 22:
|
||||
return PickCase<Op, 22, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 23:
|
||||
return PickCase<Op, 23, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 24:
|
||||
return PickCase<Op, 24, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 25:
|
||||
return PickCase<Op, 25, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 26:
|
||||
return PickCase<Op, 26, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 27:
|
||||
return PickCase<Op, 27, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 28:
|
||||
return PickCase<Op, 28, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 29:
|
||||
return PickCase<Op, 29, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 30:
|
||||
return PickCase<Op, 30, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 31:
|
||||
return PickCase<Op, 31, EndIndex>::Run(absl::forward<Op>(op));
|
||||
case 32:
|
||||
return PickCase<Op, 32, EndIndex>::Run(absl::forward<Op>(op));
|
||||
default:
|
||||
ABSL_ASSERT(i == variant_npos);
|
||||
return absl::base_internal::Invoke(absl::forward<Op>(op), NPos());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <std::size_t... EndIndices>
|
||||
struct VisitIndicesFallback {
|
||||
template <class Op, class... SizeT>
|
||||
static VisitIndicesResultT<Op, SizeT...> Run(Op&& op, SizeT... indices) {
|
||||
return AccessSimpleArray(
|
||||
MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
|
||||
index_sequence<(EndIndices + 1)...>>::Run(),
|
||||
(indices + 1)...)(absl::forward<Op>(op));
|
||||
}
|
||||
};
|
||||
|
||||
// Take an N-dimensional series of indices and convert them into a single index
|
||||
// without loss of information. The purpose of this is to be able to convert an
|
||||
// N-ary visit operation into a single switch statement.
|
||||
template <std::size_t...>
|
||||
struct FlattenIndices;
|
||||
|
||||
template <std::size_t HeadSize, std::size_t... TailSize>
|
||||
struct FlattenIndices<HeadSize, TailSize...> {
|
||||
template<class... SizeType>
|
||||
static constexpr std::size_t Run(std::size_t head, SizeType... tail) {
|
||||
return head + HeadSize * FlattenIndices<TailSize...>::Run(tail...);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct FlattenIndices<> {
|
||||
static constexpr std::size_t Run() { return 0; }
|
||||
};
|
||||
|
||||
// Take a single "flattened" index (flattened by FlattenIndices) and determine
|
||||
// the value of the index of one of the logically represented dimensions.
|
||||
template <std::size_t I, std::size_t IndexToGet, std::size_t HeadSize,
|
||||
std::size_t... TailSize>
|
||||
struct UnflattenIndex {
|
||||
static constexpr std::size_t value =
|
||||
UnflattenIndex<I / HeadSize, IndexToGet - 1, TailSize...>::value;
|
||||
};
|
||||
|
||||
template <std::size_t I, std::size_t HeadSize, std::size_t... TailSize>
|
||||
struct UnflattenIndex<I, 0, HeadSize, TailSize...> {
|
||||
static constexpr std::size_t value = (I % HeadSize);
|
||||
};
|
||||
|
||||
// The backend for converting an N-ary visit operation into a unary visit.
|
||||
template <class IndexSequence, std::size_t... EndIndices>
|
||||
struct VisitIndicesVariadicImpl;
|
||||
|
||||
template <std::size_t... N, std::size_t... EndIndices>
|
||||
struct VisitIndicesVariadicImpl<absl::index_sequence<N...>, EndIndices...> {
|
||||
// A type that can take an N-ary function object and converts it to a unary
|
||||
// function object that takes a single, flattened index, and "unflattens" it
|
||||
// into its individual dimensions when forwarding to the wrapped object.
|
||||
template <class Op>
|
||||
struct FlattenedOp {
|
||||
template <std::size_t I>
|
||||
VisitIndicesResultT<Op, decltype(EndIndices)...> operator()(
|
||||
SizeT<I> /*index*/) && {
|
||||
return base_internal::Invoke(
|
||||
absl::forward<Op>(op),
|
||||
SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value -
|
||||
std::size_t{1}>()...);
|
||||
}
|
||||
|
||||
Op&& op;
|
||||
};
|
||||
|
||||
template <class Op, class... SizeType>
|
||||
static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(
|
||||
Op&& op, SizeType... i) {
|
||||
return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
|
||||
FlattenedOp<Op>{absl::forward<Op>(op)},
|
||||
FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
|
||||
(i + std::size_t{1})...));
|
||||
}
|
||||
};
|
||||
|
||||
template <std::size_t... EndIndices>
|
||||
struct VisitIndicesVariadic
|
||||
: VisitIndicesVariadicImpl<absl::make_index_sequence<sizeof...(EndIndices)>,
|
||||
EndIndices...> {};
|
||||
|
||||
// This implementation will flatten N-ary visit operations into a single switch
|
||||
// statement when the number of cases would be less than our maximum specified
|
||||
// switch-statement size.
|
||||
// TODO(calabrese)
|
||||
// Based on benchmarks, determine whether the function table approach actually
|
||||
// does optimize better than a chain of switch statements and possibly update
|
||||
// the implementation accordingly. Also consider increasing the maximum switch
|
||||
// size.
|
||||
template <std::size_t... EndIndices>
|
||||
struct VisitIndices
|
||||
: absl::conditional_t<(NumCasesOfSwitch<EndIndices...>::value <=
|
||||
MaxUnrolledVisitCases),
|
||||
VisitIndicesVariadic<EndIndices...>,
|
||||
VisitIndicesFallback<EndIndices...>> {};
|
||||
|
||||
template <std::size_t EndIndex>
|
||||
struct VisitIndices<EndIndex>
|
||||
: absl::conditional_t<(EndIndex <= MaxUnrolledVisitCases),
|
||||
VisitIndicesSwitch<EndIndex>,
|
||||
VisitIndicesFallback<EndIndex>> {};
|
||||
|
||||
// Suppress bogus warning on MSVC: MSVC complains that the `reinterpret_cast`
|
||||
// below is returning the address of a temporary or local object.
|
||||
#ifdef _MSC_VER
|
||||
|
@ -270,8 +527,10 @@ template <class ReturnType>
|
|||
|
||||
// TODO(calabrese) std::launder
|
||||
// TODO(calabrese) constexpr
|
||||
// NOTE: DO NOT REMOVE the `inline` keyword as it is necessary to work around a
|
||||
// MSVC bug. See https://github.com/abseil/abseil-cpp/issues/129 for details.
|
||||
template <class Self, std::size_t I>
|
||||
VariantAccessResult<I, Self> AccessUnion(Self&& self, SizeT<I> /*i*/) {
|
||||
inline VariantAccessResult<I, Self> AccessUnion(Self&& self, SizeT<I> /*i*/) {
|
||||
return reinterpret_cast<VariantAccessResult<I, Self>>(self);
|
||||
}
|
||||
|
||||
|
@ -313,7 +572,7 @@ struct VariantCoreAccess {
|
|||
|
||||
template <class Variant>
|
||||
static void InitFrom(Variant& self, Variant&& other) { // NOLINT
|
||||
variant_internal::visit_indices<absl::variant_size<Variant>::value>(
|
||||
VisitIndices<absl::variant_size<Variant>::value>::Run(
|
||||
InitFromVisitor<Variant, Variant&&>{&self,
|
||||
std::forward<Variant>(other)},
|
||||
other.index());
|
||||
|
@ -1049,9 +1308,7 @@ class VariantStateBaseDestructorNontrivial : protected VariantStateBase<T...> {
|
|||
VariantStateBaseDestructorNontrivial* self;
|
||||
};
|
||||
|
||||
void destroy() {
|
||||
variant_internal::visit_indices<sizeof...(T)>(Destroyer{this}, index_);
|
||||
}
|
||||
void destroy() { VisitIndices<sizeof...(T)>::Run(Destroyer{this}, index_); }
|
||||
|
||||
~VariantStateBaseDestructorNontrivial() { destroy(); }
|
||||
|
||||
|
@ -1087,8 +1344,7 @@ class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor<T...> {
|
|||
VariantMoveBaseNontrivial(VariantMoveBaseNontrivial&& other) noexcept(
|
||||
absl::conjunction<std::is_nothrow_move_constructible<T>...>::value)
|
||||
: Base(NoopConstructorTag()) {
|
||||
variant_internal::visit_indices<sizeof...(T)>(Construct{this, &other},
|
||||
other.index_);
|
||||
VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
|
||||
index_ = other.index_;
|
||||
}
|
||||
|
||||
|
@ -1131,8 +1387,7 @@ class VariantCopyBaseNontrivial : protected VariantMoveBase<T...> {
|
|||
|
||||
VariantCopyBaseNontrivial(VariantCopyBaseNontrivial const& other)
|
||||
: Base(NoopConstructorTag()) {
|
||||
variant_internal::visit_indices<sizeof...(T)>(Construct{this, &other},
|
||||
other.index_);
|
||||
VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
|
||||
index_ = other.index_;
|
||||
}
|
||||
|
||||
|
@ -1166,7 +1421,7 @@ class VariantMoveAssignBaseNontrivial : protected VariantCopyBase<T...> {
|
|||
operator=(VariantMoveAssignBaseNontrivial&& other) noexcept(
|
||||
absl::conjunction<std::is_nothrow_move_constructible<T>...,
|
||||
std::is_nothrow_move_assignable<T>...>::value) {
|
||||
variant_internal::visit_indices<sizeof...(T)>(
|
||||
VisitIndices<sizeof...(T)>::Run(
|
||||
VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_);
|
||||
return *this;
|
||||
}
|
||||
|
@ -1195,7 +1450,7 @@ class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase<T...> {
|
|||
|
||||
VariantCopyAssignBaseNontrivial& operator=(
|
||||
const VariantCopyAssignBaseNontrivial& other) {
|
||||
variant_internal::visit_indices<sizeof...(T)>(
|
||||
VisitIndices<sizeof...(T)>::Run(
|
||||
VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_);
|
||||
return *this;
|
||||
}
|
||||
|
@ -1336,7 +1591,7 @@ struct Swap {
|
|||
template <std::size_t Wi>
|
||||
void operator()(SizeT<Wi> /*w_i*/) {
|
||||
if (v->index() == Wi) {
|
||||
visit_indices<sizeof...(Types)>(SwapSameIndex<Types...>{v, w}, Wi);
|
||||
VisitIndices<sizeof...(Types)>::Run(SwapSameIndex<Types...>{v, w}, Wi);
|
||||
} else {
|
||||
generic_swap();
|
||||
}
|
||||
|
@ -1370,11 +1625,10 @@ struct VariantHashBase<Variant,
|
|||
if (var.valueless_by_exception()) {
|
||||
return 239799884;
|
||||
}
|
||||
size_t result =
|
||||
variant_internal::visit_indices<variant_size<Variant>::value>(
|
||||
PerformVisitation<VariantHashVisitor, const Variant&>{
|
||||
std::forward_as_tuple(var), VariantHashVisitor{}},
|
||||
var.index());
|
||||
size_t result = VisitIndices<variant_size<Variant>::value>::Run(
|
||||
PerformVisitation<VariantHashVisitor, const Variant&>{
|
||||
std::forward_as_tuple(var), VariantHashVisitor{}},
|
||||
var.index());
|
||||
// Combine the index and the hash result in order to distinguish
|
||||
// std::variant<int, int> holding the same value as different alternative.
|
||||
return result ^ var.index();
|
||||
|
|
|
@ -416,12 +416,12 @@ constexpr absl::add_pointer_t<const T> get_if(
|
|||
template <typename Visitor, typename... Variants>
|
||||
variant_internal::VisitResult<Visitor, Variants...> visit(Visitor&& vis,
|
||||
Variants&&... vars) {
|
||||
return variant_internal::visit_indices<
|
||||
variant_size<absl::decay_t<Variants>>::value...>(
|
||||
variant_internal::PerformVisitation<Visitor, Variants...>{
|
||||
std::forward_as_tuple(absl::forward<Variants>(vars)...),
|
||||
absl::forward<Visitor>(vis)},
|
||||
vars.index()...);
|
||||
return variant_internal::
|
||||
VisitIndices<variant_size<absl::decay_t<Variants> >::value...>::Run(
|
||||
variant_internal::PerformVisitation<Visitor, Variants...>{
|
||||
std::forward_as_tuple(absl::forward<Variants>(vars)...),
|
||||
absl::forward<Visitor>(vis)},
|
||||
vars.index()...);
|
||||
}
|
||||
|
||||
// monostate
|
||||
|
@ -573,7 +573,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
|
|||
variant& operator=(T&& t) noexcept(
|
||||
std::is_nothrow_assignable<Tj&, T>::value&&
|
||||
std::is_nothrow_constructible<Tj, T>::value) {
|
||||
variant_internal::visit_indices<sizeof...(Tn) + 1>(
|
||||
variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
|
||||
variant_internal::VariantCoreAccess::MakeConversionAssignVisitor(
|
||||
this, absl::forward<T>(t)),
|
||||
index());
|
||||
|
@ -682,7 +682,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
|
|||
// true and `is_nothrow_swappable()` is same as `std::is_trivial()`.
|
||||
void swap(variant& rhs) noexcept(
|
||||
absl::conjunction<std::is_trivial<T0>, std::is_trivial<Tn>...>::value) {
|
||||
return variant_internal::visit_indices<sizeof...(Tn) + 1>(
|
||||
return variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
|
||||
variant_internal::Swap<T0, Tn...>{this, &rhs}, rhs.index());
|
||||
}
|
||||
};
|
||||
|
@ -722,7 +722,7 @@ template <typename... Types>
|
|||
constexpr variant_internal::RequireAllHaveEqualT<Types...> operator==(
|
||||
const variant<Types...>& a, const variant<Types...>& b) {
|
||||
return (a.index() == b.index()) &&
|
||||
variant_internal::visit_indices<sizeof...(Types)>(
|
||||
variant_internal::VisitIndices<sizeof...(Types)>::Run(
|
||||
variant_internal::EqualsOp<Types...>{&a, &b}, a.index());
|
||||
}
|
||||
|
||||
|
@ -731,7 +731,7 @@ template <typename... Types>
|
|||
constexpr variant_internal::RequireAllHaveNotEqualT<Types...> operator!=(
|
||||
const variant<Types...>& a, const variant<Types...>& b) {
|
||||
return (a.index() != b.index()) ||
|
||||
variant_internal::visit_indices<sizeof...(Types)>(
|
||||
variant_internal::VisitIndices<sizeof...(Types)>::Run(
|
||||
variant_internal::NotEqualsOp<Types...>{&a, &b}, a.index());
|
||||
}
|
||||
|
||||
|
@ -741,7 +741,7 @@ constexpr variant_internal::RequireAllHaveLessThanT<Types...> operator<(
|
|||
const variant<Types...>& a, const variant<Types...>& b) {
|
||||
return (a.index() != b.index())
|
||||
? (a.index() + 1) < (b.index() + 1)
|
||||
: variant_internal::visit_indices<sizeof...(Types)>(
|
||||
: variant_internal::VisitIndices<sizeof...(Types)>::Run(
|
||||
variant_internal::LessThanOp<Types...>{&a, &b}, a.index());
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,7 @@ constexpr variant_internal::RequireAllHaveGreaterThanT<Types...> operator>(
|
|||
const variant<Types...>& a, const variant<Types...>& b) {
|
||||
return (a.index() != b.index())
|
||||
? (a.index() + 1) > (b.index() + 1)
|
||||
: variant_internal::visit_indices<sizeof...(Types)>(
|
||||
: variant_internal::VisitIndices<sizeof...(Types)>::Run(
|
||||
variant_internal::GreaterThanOp<Types...>{&a, &b},
|
||||
a.index());
|
||||
}
|
||||
|
@ -762,7 +762,7 @@ constexpr variant_internal::RequireAllHaveLessThanOrEqualT<Types...> operator<=(
|
|||
const variant<Types...>& a, const variant<Types...>& b) {
|
||||
return (a.index() != b.index())
|
||||
? (a.index() + 1) < (b.index() + 1)
|
||||
: variant_internal::visit_indices<sizeof...(Types)>(
|
||||
: variant_internal::VisitIndices<sizeof...(Types)>::Run(
|
||||
variant_internal::LessThanOrEqualsOp<Types...>{&a, &b},
|
||||
a.index());
|
||||
}
|
||||
|
@ -773,7 +773,7 @@ constexpr variant_internal::RequireAllHaveGreaterThanOrEqualT<Types...>
|
|||
operator>=(const variant<Types...>& a, const variant<Types...>& b) {
|
||||
return (a.index() != b.index())
|
||||
? (a.index() + 1) > (b.index() + 1)
|
||||
: variant_internal::visit_indices<sizeof...(Types)>(
|
||||
: variant_internal::VisitIndices<sizeof...(Types)>::Run(
|
||||
variant_internal::GreaterThanOrEqualsOp<Types...>{&a, &b},
|
||||
a.index());
|
||||
}
|
||||
|
|
220
absl/types/variant_benchmark.cc
Normal file
220
absl/types/variant_benchmark.cc
Normal file
|
@ -0,0 +1,220 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Unit tests for the variant template. The 'is' and 'IsEmpty' methods
|
||||
// of variant are not explicitly tested because they are used repeatedly
|
||||
// in building other tests. All other public variant methods should have
|
||||
// explicit tests.
|
||||
|
||||
#include "absl/types/variant.h"
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "absl/utility/utility.h"
|
||||
|
||||
namespace absl {
|
||||
namespace {
|
||||
|
||||
template <std::size_t I>
|
||||
struct VariantAlternative {
|
||||
char member;
|
||||
};
|
||||
|
||||
template <class Indices>
|
||||
struct VariantOfAlternativesImpl;
|
||||
|
||||
template <std::size_t... Indices>
|
||||
struct VariantOfAlternativesImpl<absl::index_sequence<Indices...>> {
|
||||
using type = absl::variant<VariantAlternative<Indices>...>;
|
||||
};
|
||||
|
||||
template <std::size_t NumAlternatives>
|
||||
using VariantOfAlternatives = typename VariantOfAlternativesImpl<
|
||||
absl::make_index_sequence<NumAlternatives>>::type;
|
||||
|
||||
struct Empty {};
|
||||
|
||||
template <class... T>
|
||||
void Ignore(T...) noexcept {}
|
||||
|
||||
template <class T>
|
||||
Empty DoNotOptimizeAndReturnEmpty(T&& arg) noexcept {
|
||||
benchmark::DoNotOptimize(arg);
|
||||
return {};
|
||||
}
|
||||
|
||||
struct VisitorApplier {
|
||||
struct Visitor {
|
||||
template <class... T>
|
||||
void operator()(T&&... args) const noexcept {
|
||||
Ignore(DoNotOptimizeAndReturnEmpty(args)...);
|
||||
}
|
||||
};
|
||||
|
||||
template <class... Vars>
|
||||
void operator()(const Vars&... vars) const noexcept {
|
||||
absl::visit(Visitor(), vars...);
|
||||
}
|
||||
};
|
||||
|
||||
template <std::size_t NumIndices, std::size_t CurrIndex = NumIndices - 1>
|
||||
struct MakeWithIndex {
|
||||
using Variant = VariantOfAlternatives<NumIndices>;
|
||||
|
||||
static Variant Run(std::size_t index) {
|
||||
return index == CurrIndex
|
||||
? Variant(absl::in_place_index_t<CurrIndex>())
|
||||
: MakeWithIndex<NumIndices, CurrIndex - 1>::Run(index);
|
||||
}
|
||||
};
|
||||
|
||||
template <std::size_t NumIndices>
|
||||
struct MakeWithIndex<NumIndices, 0> {
|
||||
using Variant = VariantOfAlternatives<NumIndices>;
|
||||
|
||||
static Variant Run(std::size_t /*index*/) { return Variant(); }
|
||||
};
|
||||
|
||||
template <std::size_t NumIndices, class Dimensions>
|
||||
struct MakeVariantTuple;
|
||||
|
||||
template <class T, std::size_t /*I*/>
|
||||
using always_t = T;
|
||||
|
||||
template <std::size_t NumIndices>
|
||||
VariantOfAlternatives<NumIndices> MakeVariant(std::size_t dimension,
|
||||
std::size_t index) {
|
||||
return dimension == 0
|
||||
? MakeWithIndex<NumIndices>::Run(index % NumIndices)
|
||||
: MakeVariant<NumIndices>(dimension - 1, index / NumIndices);
|
||||
}
|
||||
|
||||
template <std::size_t NumIndices, std::size_t... Dimensions>
|
||||
struct MakeVariantTuple<NumIndices, absl::index_sequence<Dimensions...>> {
|
||||
using VariantTuple =
|
||||
std::tuple<always_t<VariantOfAlternatives<NumIndices>, Dimensions>...>;
|
||||
|
||||
static VariantTuple Run(int index) {
|
||||
return std::make_tuple(MakeVariant<NumIndices>(Dimensions, index)...);
|
||||
}
|
||||
};
|
||||
|
||||
constexpr std::size_t integral_pow(std::size_t base, std::size_t power) {
|
||||
return power == 0 ? 1 : base * integral_pow(base, power - 1);
|
||||
}
|
||||
|
||||
template <std::size_t End, std::size_t I = 0>
|
||||
struct VisitTestBody {
|
||||
template <class Vars, class State>
|
||||
static bool Run(Vars& vars, State& state) {
|
||||
if (state.KeepRunning()) {
|
||||
absl::apply(VisitorApplier(), vars[I]);
|
||||
return VisitTestBody<End, I + 1>::Run(vars, state);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
template <std::size_t End>
|
||||
struct VisitTestBody<End, End> {
|
||||
template <class Vars, class State>
|
||||
static bool Run(Vars& /*vars*/, State& /*state*/) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// Visit operations where branch prediction is likely to give a boost.
|
||||
template <std::size_t NumIndices, std::size_t NumDimensions = 1>
|
||||
void BM_RedundantVisit(benchmark::State& state) {
|
||||
auto vars =
|
||||
MakeVariantTuple<NumIndices, absl::make_index_sequence<NumDimensions>>::
|
||||
Run(static_cast<std::size_t>(state.range(0)));
|
||||
|
||||
for (auto _ : state) { // NOLINT
|
||||
benchmark::DoNotOptimize(vars);
|
||||
absl::apply(VisitorApplier(), vars);
|
||||
}
|
||||
}
|
||||
|
||||
// Visit operations where branch prediction is unlikely to give a boost.
|
||||
template <std::size_t NumIndices, std::size_t NumDimensions = 1>
|
||||
void BM_Visit(benchmark::State& state) {
|
||||
constexpr std::size_t num_possibilities =
|
||||
integral_pow(NumIndices, NumDimensions);
|
||||
|
||||
using VariantTupleMaker =
|
||||
MakeVariantTuple<NumIndices, absl::make_index_sequence<NumDimensions>>;
|
||||
using Tuple = typename VariantTupleMaker::VariantTuple;
|
||||
|
||||
Tuple vars[num_possibilities];
|
||||
for (std::size_t i = 0; i < num_possibilities; ++i)
|
||||
vars[i] = VariantTupleMaker::Run(i);
|
||||
|
||||
while (VisitTestBody<num_possibilities>::Run(vars, state)) {
|
||||
}
|
||||
}
|
||||
|
||||
// Visitation
|
||||
// Each visit is on a different variant with a different active alternative)
|
||||
|
||||
// Unary visit
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 1);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 2);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 3);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 4);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 5);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 6);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 7);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 8);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 16);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 32);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 64);
|
||||
|
||||
// Binary visit
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 1, 2);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 2, 2);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 3, 2);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 4, 2);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 5, 2);
|
||||
|
||||
// Ternary visit
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 1, 3);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 2, 3);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 3, 3);
|
||||
|
||||
// Quaternary visit
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 1, 4);
|
||||
BENCHMARK_TEMPLATE(BM_Visit, 2, 4);
|
||||
|
||||
// Redundant Visitation
|
||||
// Each visit consistently has the same alternative active
|
||||
|
||||
// Unary visit
|
||||
BENCHMARK_TEMPLATE(BM_RedundantVisit, 1)->Arg(0);
|
||||
BENCHMARK_TEMPLATE(BM_RedundantVisit, 2)->DenseRange(0, 1);
|
||||
BENCHMARK_TEMPLATE(BM_RedundantVisit, 8)->DenseRange(0, 7);
|
||||
|
||||
// Binary visit
|
||||
BENCHMARK_TEMPLATE(BM_RedundantVisit, 1, 2)->Arg(0);
|
||||
BENCHMARK_TEMPLATE(BM_RedundantVisit, 2, 2)
|
||||
->DenseRange(0, integral_pow(2, 2) - 1);
|
||||
BENCHMARK_TEMPLATE(BM_RedundantVisit, 4, 2)
|
||||
->DenseRange(0, integral_pow(4, 2) - 1);
|
||||
|
||||
} // namespace
|
||||
} // namespace absl
|
Loading…
Reference in a new issue