tvl-depot/absl/container/inlined_vector_benchmark.cc
Abseil Team 361cb8a9db Export of internal Abseil changes.
--
4a21ad4ffa957d28b770de8717289fab7410f567 by Gennadiy Rozental <rogeeff@google.com>:

Internal cleanup

PiperOrigin-RevId: 252366381

--
b6b0f25439549c54f1537a16625be1fecd3c7d8c by Xiaoyi Zhang <zhangxy@google.com>:

Fix C4245 compiler warning of visual studio. This allows using abseil headers
in code requiring strict warning settings.
This is an import of https://github.com/abseil/abseil-cpp/pull/321.

PiperOrigin-RevId: 252101240

--
0543b7861b658a5a665298e1d868e29968ff7b27 by CJ Johnson <johnsoncj@google.com>:

Adds new benchmarks for the constructors of InlinedVector

PiperOrigin-RevId: 251905349

--
c65a08507917e9f8f6450b8beb235fe1426d7954 by CJ Johnson <johnsoncj@google.com>:

Updates the InlinedVector BatchedBenchmark abstractions to 1) provide the index of the instance back to the prepare and test functions so that callers may perform extra work on local state with a unique per-instance ID and 2) reduce the number of manually written calls to BENCHMARK_TEMPLATE.

PiperOrigin-RevId: 251895546

--
99a1ae2d786b80096172f6e018711e15c0c750b9 by Samuel Benzaquen <sbenza@google.com>:

Fix ambiguous construction problem in absl::variant<> to make in line with
std::variant.
ImaginaryFun is hiding duplicate objects instead of causing ambiguity. Add a
second unique argument to make sure all overloads exist in the final overload
set.

PiperOrigin-RevId: 251860694

--
b54d0a12673be6ebb6e77e24a556ce9b758b3a7e by Abseil Team <absl-team@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 251739183

--
f51b115e0dc3fc9a9c9c20b33a1f27027a700d48 by Abseil Team <absl-team@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 251686812

--
30e868049282dc6a6fc77d923ca7d2a5d35a1658 by Xiaoyi Zhang <zhangxy@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 251652119
GitOrigin-RevId: 4a21ad4ffa957d28b770de8717289fab7410f567
Change-Id: I7171cb613793fa90e0eb0143b65ec8264a2a84db
2019-06-10 13:31:00 -04:00

538 lines
16 KiB
C++

// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <array>
#include <string>
#include <vector>
#include "benchmark/benchmark.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
namespace {
void BM_InlinedVectorFill(benchmark::State& state) {
absl::InlinedVector<int, 8> v;
int val = 10;
for (auto _ : state) {
benchmark::DoNotOptimize(v);
v.push_back(val);
}
}
BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
void BM_InlinedVectorFillRange(benchmark::State& state) {
const int len = state.range(0);
std::unique_ptr<int[]> ia(new int[len]);
for (int i = 0; i < len; i++) {
ia[i] = i;
}
auto* from = ia.get();
auto* to = from + len;
for (auto _ : state) {
benchmark::DoNotOptimize(from);
benchmark::DoNotOptimize(to);
absl::InlinedVector<int, 8> v(from, to);
benchmark::DoNotOptimize(v);
}
}
BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
void BM_StdVectorFill(benchmark::State& state) {
std::vector<int> v;
int val = 10;
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(val);
v.push_back(val);
}
}
BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
// The purpose of the next two benchmarks is to verify that
// absl::InlinedVector is efficient when moving is more efficent than
// copying. To do so, we use strings that are larger than the short
// string optimization.
bool StringRepresentedInline(std::string s) {
const char* chars = s.data();
std::string s1 = std::move(s);
return s1.data() != chars;
}
int GetNonShortStringOptimizationSize() {
for (int i = 24; i <= 192; i *= 2) {
if (!StringRepresentedInline(std::string(i, 'A'))) {
return i;
}
}
ABSL_RAW_LOG(
FATAL,
"Failed to find a std::string larger than the short std::string optimization");
return -1;
}
void BM_InlinedVectorFillString(benchmark::State& state) {
const int len = state.range(0);
const int no_sso = GetNonShortStringOptimizationSize();
std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
std::string(no_sso, 'C'), std::string(no_sso, 'D')};
for (auto _ : state) {
absl::InlinedVector<std::string, 8> v;
for (int i = 0; i < len; i++) {
v.push_back(strings[i & 3]);
}
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
}
BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024);
void BM_StdVectorFillString(benchmark::State& state) {
const int len = state.range(0);
const int no_sso = GetNonShortStringOptimizationSize();
std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
std::string(no_sso, 'C'), std::string(no_sso, 'D')};
for (auto _ : state) {
std::vector<std::string> v;
for (int i = 0; i < len; i++) {
v.push_back(strings[i & 3]);
}
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
}
BENCHMARK(BM_StdVectorFillString)->Range(0, 1024);
struct Buffer { // some arbitrary structure for benchmarking.
char* base;
int length;
int capacity;
void* user_data;
};
void BM_InlinedVectorAssignments(benchmark::State& state) {
const int len = state.range(0);
using BufferVec = absl::InlinedVector<Buffer, 2>;
BufferVec src;
src.resize(len);
BufferVec dst;
for (auto _ : state) {
benchmark::DoNotOptimize(dst);
benchmark::DoNotOptimize(src);
dst = src;
}
}
BENCHMARK(BM_InlinedVectorAssignments)
->Arg(0)
->Arg(1)
->Arg(2)
->Arg(3)
->Arg(4)
->Arg(20);
void BM_CreateFromContainer(benchmark::State& state) {
for (auto _ : state) {
absl::InlinedVector<int, 4> src{1, 2, 3};
benchmark::DoNotOptimize(src);
absl::InlinedVector<int, 4> dst(std::move(src));
benchmark::DoNotOptimize(dst);
}
}
BENCHMARK(BM_CreateFromContainer);
struct LargeCopyableOnly {
LargeCopyableOnly() : d(1024, 17) {}
LargeCopyableOnly(const LargeCopyableOnly& o) = default;
LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default;
std::vector<int> d;
};
struct LargeCopyableSwappable {
LargeCopyableSwappable() : d(1024, 17) {}
LargeCopyableSwappable(const LargeCopyableSwappable& o) = default;
LargeCopyableSwappable& operator=(LargeCopyableSwappable o) {
using std::swap;
swap(*this, o);
return *this;
}
friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) {
using std::swap;
swap(a.d, b.d);
}
std::vector<int> d;
};
struct LargeCopyableMovable {
LargeCopyableMovable() : d(1024, 17) {}
// Use implicitly defined copy and move.
std::vector<int> d;
};
struct LargeCopyableMovableSwappable {
LargeCopyableMovableSwappable() : d(1024, 17) {}
LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) =
default;
LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default;
LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) {
using std::swap;
swap(*this, o);
return *this;
}
LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) =
default;
friend void swap(LargeCopyableMovableSwappable& a,
LargeCopyableMovableSwappable& b) {
using std::swap;
swap(a.d, b.d);
}
std::vector<int> d;
};
template <typename ElementType>
void BM_SwapElements(benchmark::State& state) {
const int len = state.range(0);
using Vec = absl::InlinedVector<ElementType, 32>;
Vec a(len);
Vec b;
for (auto _ : state) {
using std::swap;
benchmark::DoNotOptimize(a);
benchmark::DoNotOptimize(b);
swap(a, b);
}
}
BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024);
BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024);
BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024);
BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable)
->Range(0, 1024);
// The following benchmark is meant to track the efficiency of the vector size
// as a function of stored type via the benchmark label. It is not meant to
// output useful sizeof operator performance. The loop is a dummy operation
// to fulfill the requirement of running the benchmark.
template <typename VecType>
void BM_Sizeof(benchmark::State& state) {
int size = 0;
for (auto _ : state) {
VecType vec;
size = sizeof(vec);
}
state.SetLabel(absl::StrCat("sz=", size));
}
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 1>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 4>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 7>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 8>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 1>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 4>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 7>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 8>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 1>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 4>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 7>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 8>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 1>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 4>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 7>);
BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
void BM_InlinedVectorIndexInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v[4]);
}
}
BENCHMARK(BM_InlinedVectorIndexInlined);
void BM_InlinedVectorIndexExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v[4]);
}
}
BENCHMARK(BM_InlinedVectorIndexExternal);
void BM_StdVectorIndex(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v[4]);
}
}
BENCHMARK(BM_StdVectorIndex);
void BM_InlinedVectorDataInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.data());
}
}
BENCHMARK(BM_InlinedVectorDataInlined);
void BM_InlinedVectorDataExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.data());
}
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorDataExternal);
void BM_StdVectorData(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.data());
}
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_StdVectorData);
void BM_InlinedVectorSizeInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.size());
}
}
BENCHMARK(BM_InlinedVectorSizeInlined);
void BM_InlinedVectorSizeExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.size());
}
}
BENCHMARK(BM_InlinedVectorSizeExternal);
void BM_StdVectorSize(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.size());
}
}
BENCHMARK(BM_StdVectorSize);
void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.empty());
}
}
BENCHMARK(BM_InlinedVectorEmptyInlined);
void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.empty());
}
}
BENCHMARK(BM_InlinedVectorEmptyExternal);
void BM_StdVectorEmpty(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
benchmark::DoNotOptimize(v);
benchmark::DoNotOptimize(v.empty());
}
}
BENCHMARK(BM_StdVectorEmpty);
constexpr size_t kInlinedCapacity = 4;
constexpr size_t kLargeSize = kInlinedCapacity * 2;
constexpr size_t kSmallSize = kInlinedCapacity / 2;
constexpr size_t kBatchSize = 100;
#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \
BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \
BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
template <typename T>
using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
struct TrivialType {
size_t val;
};
class NontrivialType {
public:
ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() {
benchmark::DoNotOptimize(*this);
}
ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other)
: val_(other.val_) {
benchmark::DoNotOptimize(*this);
}
ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=(
const NontrivialType& other) {
val_ = other.val_;
benchmark::DoNotOptimize(*this);
return *this;
}
ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept {
benchmark::DoNotOptimize(*this);
}
private:
size_t val_;
};
template <typename T, typename PrepareVecFn, typename TestVecFn>
void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec,
TestVecFn test_vec) {
std::array<InlVec<T>, kBatchSize> vector_batch{};
while (state.KeepRunningBatch(kBatchSize)) {
// Prepare batch
state.PauseTiming();
for (size_t i = 0; i < kBatchSize; ++i) {
prepare_vec(vector_batch.data() + i, i);
}
benchmark::DoNotOptimize(vector_batch);
state.ResumeTiming();
// Test batch
for (size_t i = 0; i < kBatchSize; ++i) {
test_vec(vector_batch.data() + i, i);
}
}
}
template <typename T, size_t ToSize>
void BM_ConstructFromSize(benchmark::State& state) {
using VecT = InlVec<T>;
auto size = ToSize;
BatchedBenchmark<T>(
state,
/* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
/* test_vec = */
[&](void* ptr, size_t) {
benchmark::DoNotOptimize(size);
::new (ptr) VecT(size);
});
}
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType);
template <typename T, size_t ToSize>
void BM_ConstructFromSizeRef(benchmark::State& state) {
using VecT = InlVec<T>;
auto size = ToSize;
auto ref = T();
BatchedBenchmark<T>(
state,
/* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
/* test_vec = */
[&](void* ptr, size_t) {
benchmark::DoNotOptimize(size);
benchmark::DoNotOptimize(ref);
::new (ptr) VecT(size, ref);
});
}
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType);
template <typename T, size_t ToSize>
void BM_ConstructFromRange(benchmark::State& state) {
using VecT = InlVec<T>;
std::array<T, ToSize> arr{};
BatchedBenchmark<T>(
state,
/* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
/* test_vec = */
[&](void* ptr, size_t) {
benchmark::DoNotOptimize(arr);
::new (ptr) VecT(arr.begin(), arr.end());
});
}
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType);
template <typename T, size_t ToSize>
void BM_ConstructFromCopy(benchmark::State& state) {
using VecT = InlVec<T>;
VecT other_vec(ToSize);
BatchedBenchmark<T>(
state,
/* prepare_vec = */
[](InlVec<T>* vec, size_t) { vec->~VecT(); },
/* test_vec = */
[&](void* ptr, size_t) {
benchmark::DoNotOptimize(other_vec);
::new (ptr) VecT(other_vec);
});
}
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType);
template <typename T, size_t ToSize>
void BM_ConstructFromMove(benchmark::State& state) {
using VecT = InlVec<T>;
std::array<VecT, kBatchSize> vector_batch{};
BatchedBenchmark<T>(
state,
/* prepare_vec = */
[&](InlVec<T>* vec, size_t i) {
vector_batch[i].clear();
vector_batch[i].resize(ToSize);
vec->~VecT();
},
/* test_vec = */
[&](void* ptr, size_t i) {
benchmark::DoNotOptimize(vector_batch[i]);
::new (ptr) VecT(std::move(vector_batch[i]));
});
}
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
template <typename T, size_t FromSize>
void BM_Clear(benchmark::State& state) {
BatchedBenchmark<T>(
state,
/* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
/* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); });
}
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
} // namespace