Export of internal Abseil changes.
-- 8b7c3bc2fb69608e9b2389b1be0b0de840a4c59d by Derek Mauro <dmauro@google.com>: Set correct flags for clang-cl. https://github.com/abseil/abseil-cpp/pull/278 clang-cl produce binaries with MSVC ABI and wants to be as flag-compatible with pure MSVC as possible, so this leads to all sorts of weird cases. clang-cl alias /Wall as clang's -Weverything which is way too verbose, so it needs /W3 like pure MSVC. clang-cl only understand GCC style warning flags (-W[no]blah) and just silent drop MSVC style warning flags (/wd[num]). clang-cl needs MSVC define flags since it is consuming the same header files as pure MSVC. CMake set CMAKE_CXX_COMPILER_ID as Clang when clang-cl is detected, so need extra if (MSVC) to differentiate it. We are not doing clang-cl specialization in Bazel as currently there is no reliable way to detect clang-cl in Bazel.. Other changes: Add ABSL_ prefix to variable names to avoid name collision in CMake. PiperOrigin-RevId: 239841297 -- add96c3fc067d5c7b6f016d2ba74725a443a185e by CJ Johnson <johnsoncj@google.com>: Eventually Storage will need to refer to the type `absl::InlinedVector<...>*`. This can be done via a forward declaration. However, doing so would move the defaulted allocator template parameter to the forward declaration and thus inside an internal file. Instead of doing that, this change gives Storage access to the template and it's parameters so the complete type can be formed without including it. PiperOrigin-RevId: 239811298 -- b5f5279f1b13b09cae5c745597d64ea1efab146b by CJ Johnson <johnsoncj@google.com>: Simplify/cleanup the benchmark tests for InlinedVector PiperOrigin-RevId: 239805767 -- f5991e51b43b13a0ae95025474071f5039a33d27 by Matt Calabrese <calabrese@google.com>: Update the internal-only IsSwappable traits to be nested inside of namespace absl so that the script to add inline namespaces for LTS releases works with the implementation. PiperOrigin-RevId: 239622024 -- d1cb234dc5706f033ad56f4eb16d94ac5da80d52 by Abseil Team <absl-team@google.com>: Mutex: fix tsan annotations This fixes 2 bugs: 1. We call cond directly in Mutex::AwaitCommon without using EvalConditionAnnotated. As the result we call into user code ignoring synchronization, miss synchronization and report false positives later. Use EvalConditionAnnotated to call cond as we should. 2. We call Mutex invariant ignoring synchronization. Result is the same: we miss synchronization and report false positive races later. Reuse EvalConditionAnnotated to call mutex invariant too. PiperOrigin-RevId: 239583878 -- 52295e4922a9b408fa0dd03d27bc91ccc6645cd7 by Abseil Team <absl-team@google.com>: Clarify how to obtain the same behavior as std::unordered_map::erase if need be. PiperOrigin-RevId: 239549513 -- 6e76e68ed084fd1247981dbb92677ce8e563b0ec by Jon Cohen <cohenjon@google.com>: Avoid the -S -B form of `cmake` since it's only supported starting in CMake 3.13 PiperOrigin-RevId: 239473143 GitOrigin-RevId: 8b7c3bc2fb69608e9b2389b1be0b0de840a4c59d Change-Id: Ib6d356fa1a7435260273df991e65df4149bd5861
This commit is contained in:
parent
253eb74164
commit
eab2078b53
9 changed files with 208 additions and 153 deletions
|
@ -30,27 +30,32 @@ project_dir="${absl_dir}"/CMake/install_test_project
|
||||||
project_build_dir=/buildfs/project-build
|
project_build_dir=/buildfs/project-build
|
||||||
install_dir="${project_build_dir}"/install
|
install_dir="${project_build_dir}"/install
|
||||||
|
|
||||||
|
mkdir -p "${absl_build_dir}"
|
||||||
|
mkdir -p "${project_build_dir}"
|
||||||
|
mkdir -p "${install_dir}"
|
||||||
|
|
||||||
install_absl() {
|
install_absl() {
|
||||||
|
pushd "${absl_build_dir}"
|
||||||
if [[ "${#}" -eq 1 ]]; then
|
if [[ "${#}" -eq 1 ]]; then
|
||||||
cmake -DCMAKE_INSTALL_PREFIX="${1}" -B "${absl_build_dir}" -S "${absl_dir}"
|
cmake -DCMAKE_INSTALL_PREFIX="${1}" "${absl_dir}"
|
||||||
else
|
else
|
||||||
cmake -B "${absl_build_dir}" -S "${absl_dir}"
|
cmake "${absl_dir}"
|
||||||
fi
|
fi
|
||||||
cmake --build "${absl_build_dir}" --target install -- -j
|
cmake --build . --target install -- -j
|
||||||
|
popd
|
||||||
}
|
}
|
||||||
|
|
||||||
uninstall_absl() {
|
uninstall_absl() {
|
||||||
xargs rm < "${absl_build_dir}"/install_manifest.txt
|
xargs rm < "${absl_build_dir}"/install_manifest.txt
|
||||||
rm -rf "${absl_build_dir}"
|
rm -rf "${absl_build_dir}"
|
||||||
|
mkdir -p "${absl_build_dir}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Test build, install, and link against installed abseil
|
# Test build, install, and link against installed abseil
|
||||||
install_absl "${install_dir}"
|
install_absl "${install_dir}"
|
||||||
cmake \
|
pushd "${project_build_dir}"
|
||||||
-H"${project_dir}" \
|
cmake "${project_dir}" -DCMAKE_PREFIX_PATH="${install_dir}"
|
||||||
-B"${project_build_dir}" \
|
cmake --build . --target simple
|
||||||
-DCMAKE_PREFIX_PATH="${install_dir}"
|
|
||||||
cmake --build "${project_build_dir}" --target simple
|
|
||||||
|
|
||||||
output="$(${project_build_dir}/simple "printme" 2>&1)"
|
output="$(${project_build_dir}/simple "printme" 2>&1)"
|
||||||
if [[ "${output}" != *"Arg 1: printme"* ]]; then
|
if [[ "${output}" != *"Arg 1: printme"* ]]; then
|
||||||
|
|
|
@ -219,8 +219,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
|
||||||
// Erases the element at `position` of the `flat_hash_map`, returning
|
// Erases the element at `position` of the `flat_hash_map`, returning
|
||||||
// `void`.
|
// `void`.
|
||||||
//
|
//
|
||||||
// NOTE: this return behavior is different than that of STL containers in
|
// NOTE: returning `void` in this case is different than that of STL
|
||||||
// general and `std::unordered_map` in particular.
|
// containers in general and `std::unordered_map` in particular (which
|
||||||
|
// return an iterator to the element following the erased element). If that
|
||||||
|
// iterator is needed, simply post increment the iterator:
|
||||||
|
//
|
||||||
|
// map.erase(it++);
|
||||||
//
|
//
|
||||||
// iterator erase(const_iterator first, const_iterator last):
|
// iterator erase(const_iterator first, const_iterator last):
|
||||||
//
|
//
|
||||||
|
|
|
@ -212,8 +212,12 @@ class flat_hash_set
|
||||||
// Erases the element at `position` of the `flat_hash_set`, returning
|
// Erases the element at `position` of the `flat_hash_set`, returning
|
||||||
// `void`.
|
// `void`.
|
||||||
//
|
//
|
||||||
// NOTE: this return behavior is different than that of STL containers in
|
// NOTE: returning `void` in this case is different than that of STL
|
||||||
// general and `std::unordered_map` in particular.
|
// containers in general and `std::unordered_set` in particular (which
|
||||||
|
// return an iterator to the element following the erased element). If that
|
||||||
|
// iterator is needed, simply post increment the iterator:
|
||||||
|
//
|
||||||
|
// set.erase(it++);
|
||||||
//
|
//
|
||||||
// iterator erase(const_iterator first, const_iterator last):
|
// iterator erase(const_iterator first, const_iterator last):
|
||||||
//
|
//
|
||||||
|
|
|
@ -66,7 +66,10 @@ namespace absl {
|
||||||
// designed to cover the same API footprint as covered by `std::vector`.
|
// designed to cover the same API footprint as covered by `std::vector`.
|
||||||
template <typename T, size_t N, typename A = std::allocator<T>>
|
template <typename T, size_t N, typename A = std::allocator<T>>
|
||||||
class InlinedVector {
|
class InlinedVector {
|
||||||
using Storage = inlined_vector_internal::InlinedVectorStorage<T, N, A>;
|
static_assert(
|
||||||
|
N > 0, "InlinedVector cannot be instantiated with `0` inlined elements.");
|
||||||
|
|
||||||
|
using Storage = inlined_vector_internal::Storage<InlinedVector>;
|
||||||
using Tag = typename Storage::Tag;
|
using Tag = typename Storage::Tag;
|
||||||
using AllocatorAndTag = typename Storage::AllocatorAndTag;
|
using AllocatorAndTag = typename Storage::AllocatorAndTag;
|
||||||
using Allocation = typename Storage::Allocation;
|
using Allocation = typename Storage::Allocation;
|
||||||
|
@ -283,8 +286,7 @@ class InlinedVector {
|
||||||
// will no longer be inlined and `capacity()` will equal its capacity on the
|
// will no longer be inlined and `capacity()` will equal its capacity on the
|
||||||
// allocated heap.
|
// allocated heap.
|
||||||
size_type capacity() const noexcept {
|
size_type capacity() const noexcept {
|
||||||
return allocated() ? allocation().capacity()
|
return allocated() ? allocation().capacity() : static_cast<size_type>(N);
|
||||||
: Storage::GetInlinedCapacity();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// `InlinedVector::data()`
|
// `InlinedVector::data()`
|
||||||
|
@ -802,19 +804,19 @@ class InlinedVector {
|
||||||
// `InlinedVector::shrink_to_fit()`
|
// `InlinedVector::shrink_to_fit()`
|
||||||
//
|
//
|
||||||
// Reduces memory usage by freeing unused memory. After this call, calls to
|
// Reduces memory usage by freeing unused memory. After this call, calls to
|
||||||
// `capacity()` will be equal to `max(Storage::GetInlinedCapacity(), size())`.
|
// `capacity()` will be equal to `max(N, size())`.
|
||||||
//
|
//
|
||||||
// If `size() <= Storage::GetInlinedCapacity()` and the elements are currently
|
// If `size() <= N` and the elements are currently stored on the heap, they
|
||||||
// stored on the heap, they will be moved to the inlined storage and the heap
|
// will be moved to the inlined storage and the heap memory will be
|
||||||
// memory will be deallocated.
|
// deallocated.
|
||||||
//
|
//
|
||||||
// If `size() > Storage::GetInlinedCapacity()` and `size() < capacity()` the
|
// If `size() > N` and `size() < capacity()` the elements will be moved to a
|
||||||
// elements will be moved to a smaller heap allocation.
|
// smaller heap allocation.
|
||||||
void shrink_to_fit() {
|
void shrink_to_fit() {
|
||||||
const auto s = size();
|
const auto s = size();
|
||||||
if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return;
|
if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return;
|
||||||
|
|
||||||
if (s <= Storage::GetInlinedCapacity()) {
|
if (s <= N) {
|
||||||
// Move the elements to the inlined storage.
|
// Move the elements to the inlined storage.
|
||||||
// We have to do this using a temporary, because `inlined_storage` and
|
// We have to do this using a temporary, because `inlined_storage` and
|
||||||
// `allocation_storage` are in a union field.
|
// `allocation_storage` are in a union field.
|
||||||
|
@ -943,7 +945,7 @@ class InlinedVector {
|
||||||
const size_type s = size();
|
const size_type s = size();
|
||||||
assert(s <= capacity());
|
assert(s <= capacity());
|
||||||
|
|
||||||
size_type target = (std::max)(Storage::GetInlinedCapacity(), s + delta);
|
size_type target = (std::max)(N, s + delta);
|
||||||
|
|
||||||
// Compute new capacity by repeatedly doubling current capacity
|
// Compute new capacity by repeatedly doubling current capacity
|
||||||
// TODO(psrc): Check and avoid overflow?
|
// TODO(psrc): Check and avoid overflow?
|
||||||
|
@ -1046,7 +1048,7 @@ class InlinedVector {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitAssign(size_type n) {
|
void InitAssign(size_type n) {
|
||||||
if (n > Storage::GetInlinedCapacity()) {
|
if (n > N) {
|
||||||
Allocation new_allocation(allocator(), n);
|
Allocation new_allocation(allocator(), n);
|
||||||
init_allocation(new_allocation);
|
init_allocation(new_allocation);
|
||||||
UninitializedFill(allocated_space(), allocated_space() + n);
|
UninitializedFill(allocated_space(), allocated_space() + n);
|
||||||
|
@ -1058,7 +1060,7 @@ class InlinedVector {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitAssign(size_type n, const_reference v) {
|
void InitAssign(size_type n, const_reference v) {
|
||||||
if (n > Storage::GetInlinedCapacity()) {
|
if (n > N) {
|
||||||
Allocation new_allocation(allocator(), n);
|
Allocation new_allocation(allocator(), n);
|
||||||
init_allocation(new_allocation);
|
init_allocation(new_allocation);
|
||||||
UninitializedFill(allocated_space(), allocated_space() + n, v);
|
UninitializedFill(allocated_space(), allocated_space() + n, v);
|
||||||
|
|
|
@ -23,17 +23,13 @@
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
using IntVec = absl::InlinedVector<int, 8>;
|
|
||||||
|
|
||||||
void BM_InlinedVectorFill(benchmark::State& state) {
|
void BM_InlinedVectorFill(benchmark::State& state) {
|
||||||
const int len = state.range(0);
|
absl::InlinedVector<int, 8> v;
|
||||||
|
int val = 10;
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
IntVec v;
|
benchmark::DoNotOptimize(v);
|
||||||
for (int i = 0; i < len; i++) {
|
v.push_back(val);
|
||||||
v.push_back(i);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
|
BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
|
||||||
|
|
||||||
|
@ -43,23 +39,25 @@ void BM_InlinedVectorFillRange(benchmark::State& state) {
|
||||||
for (int i = 0; i < len; i++) {
|
for (int i = 0; i < len; i++) {
|
||||||
ia[i] = i;
|
ia[i] = i;
|
||||||
}
|
}
|
||||||
|
auto* from = ia.get();
|
||||||
|
auto* to = from + len;
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
IntVec v(ia.get(), ia.get() + len);
|
benchmark::DoNotOptimize(from);
|
||||||
|
benchmark::DoNotOptimize(to);
|
||||||
|
absl::InlinedVector<int, 8> v(from, to);
|
||||||
benchmark::DoNotOptimize(v);
|
benchmark::DoNotOptimize(v);
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
|
BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
|
||||||
|
|
||||||
void BM_StdVectorFill(benchmark::State& state) {
|
void BM_StdVectorFill(benchmark::State& state) {
|
||||||
const int len = state.range(0);
|
std::vector<int> v;
|
||||||
|
int val = 10;
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
std::vector<int> v;
|
benchmark::DoNotOptimize(v);
|
||||||
for (int i = 0; i < len; i++) {
|
benchmark::DoNotOptimize(val);
|
||||||
v.push_back(i);
|
v.push_back(val);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
|
BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
|
||||||
|
|
||||||
|
@ -124,7 +122,7 @@ struct Buffer { // some arbitrary structure for benchmarking.
|
||||||
void* user_data;
|
void* user_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
void BM_InlinedVectorTenAssignments(benchmark::State& state) {
|
void BM_InlinedVectorAssignments(benchmark::State& state) {
|
||||||
const int len = state.range(0);
|
const int len = state.range(0);
|
||||||
using BufferVec = absl::InlinedVector<Buffer, 2>;
|
using BufferVec = absl::InlinedVector<Buffer, 2>;
|
||||||
|
|
||||||
|
@ -133,18 +131,25 @@ void BM_InlinedVectorTenAssignments(benchmark::State& state) {
|
||||||
|
|
||||||
BufferVec dst;
|
BufferVec dst;
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < 10; ++i) {
|
benchmark::DoNotOptimize(dst);
|
||||||
dst = src;
|
benchmark::DoNotOptimize(src);
|
||||||
}
|
dst = src;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorTenAssignments)
|
BENCHMARK(BM_InlinedVectorAssignments)
|
||||||
->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(20);
|
->Arg(0)
|
||||||
|
->Arg(1)
|
||||||
|
->Arg(2)
|
||||||
|
->Arg(3)
|
||||||
|
->Arg(4)
|
||||||
|
->Arg(20);
|
||||||
|
|
||||||
void BM_CreateFromContainer(benchmark::State& state) {
|
void BM_CreateFromContainer(benchmark::State& state) {
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
absl::InlinedVector<int, 4> x(absl::InlinedVector<int, 4>{1, 2, 3});
|
absl::InlinedVector<int, 4> src{1, 2, 3};
|
||||||
benchmark::DoNotOptimize(x);
|
benchmark::DoNotOptimize(src);
|
||||||
|
absl::InlinedVector<int, 4> dst(std::move(src));
|
||||||
|
benchmark::DoNotOptimize(dst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_CreateFromContainer);
|
BENCHMARK(BM_CreateFromContainer);
|
||||||
|
@ -214,6 +219,8 @@ void BM_SwapElements(benchmark::State& state) {
|
||||||
Vec b;
|
Vec b;
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
using std::swap;
|
using std::swap;
|
||||||
|
benchmark::DoNotOptimize(a);
|
||||||
|
benchmark::DoNotOptimize(b);
|
||||||
swap(a, b);
|
swap(a, b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -259,60 +266,44 @@ BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
|
||||||
void BM_InlinedVectorIndexInlined(benchmark::State& state) {
|
void BM_InlinedVectorIndexInlined(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < 1000; ++i) {
|
benchmark::DoNotOptimize(v);
|
||||||
benchmark::DoNotOptimize(v);
|
benchmark::DoNotOptimize(v[4]);
|
||||||
benchmark::DoNotOptimize(v[4]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorIndexInlined);
|
BENCHMARK(BM_InlinedVectorIndexInlined);
|
||||||
|
|
||||||
void BM_InlinedVectorIndexExternal(benchmark::State& state) {
|
void BM_InlinedVectorIndexExternal(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < 1000; ++i) {
|
benchmark::DoNotOptimize(v);
|
||||||
benchmark::DoNotOptimize(v);
|
benchmark::DoNotOptimize(v[4]);
|
||||||
benchmark::DoNotOptimize(v[4]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorIndexExternal);
|
BENCHMARK(BM_InlinedVectorIndexExternal);
|
||||||
|
|
||||||
void BM_StdVectorIndex(benchmark::State& state) {
|
void BM_StdVectorIndex(benchmark::State& state) {
|
||||||
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
for (int i = 0; i < 1000; ++i) {
|
benchmark::DoNotOptimize(v);
|
||||||
benchmark::DoNotOptimize(v);
|
benchmark::DoNotOptimize(v[4]);
|
||||||
benchmark::DoNotOptimize(v[4]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StdVectorIndex);
|
BENCHMARK(BM_StdVectorIndex);
|
||||||
|
|
||||||
#define UNROLL_2(x) \
|
|
||||||
benchmark::DoNotOptimize(x); \
|
|
||||||
benchmark::DoNotOptimize(x);
|
|
||||||
|
|
||||||
#define UNROLL_4(x) UNROLL_2(x) UNROLL_2(x)
|
|
||||||
#define UNROLL_8(x) UNROLL_4(x) UNROLL_4(x)
|
|
||||||
#define UNROLL_16(x) UNROLL_8(x) UNROLL_8(x);
|
|
||||||
|
|
||||||
void BM_InlinedVectorDataInlined(benchmark::State& state) {
|
void BM_InlinedVectorDataInlined(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.data());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.data());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorDataInlined);
|
BENCHMARK(BM_InlinedVectorDataInlined);
|
||||||
|
|
||||||
void BM_InlinedVectorDataExternal(benchmark::State& state) {
|
void BM_InlinedVectorDataExternal(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.data());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.data());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
||||||
}
|
}
|
||||||
|
@ -321,7 +312,8 @@ BENCHMARK(BM_InlinedVectorDataExternal);
|
||||||
void BM_StdVectorData(benchmark::State& state) {
|
void BM_StdVectorData(benchmark::State& state) {
|
||||||
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.data());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.data());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
||||||
}
|
}
|
||||||
|
@ -330,54 +322,54 @@ BENCHMARK(BM_StdVectorData);
|
||||||
void BM_InlinedVectorSizeInlined(benchmark::State& state) {
|
void BM_InlinedVectorSizeInlined(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.size());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.size());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorSizeInlined);
|
BENCHMARK(BM_InlinedVectorSizeInlined);
|
||||||
|
|
||||||
void BM_InlinedVectorSizeExternal(benchmark::State& state) {
|
void BM_InlinedVectorSizeExternal(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.size());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.size());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorSizeExternal);
|
BENCHMARK(BM_InlinedVectorSizeExternal);
|
||||||
|
|
||||||
void BM_StdVectorSize(benchmark::State& state) {
|
void BM_StdVectorSize(benchmark::State& state) {
|
||||||
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.size());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.size());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StdVectorSize);
|
BENCHMARK(BM_StdVectorSize);
|
||||||
|
|
||||||
void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
|
void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.empty());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.empty());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorEmptyInlined);
|
BENCHMARK(BM_InlinedVectorEmptyInlined);
|
||||||
|
|
||||||
void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
|
void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
|
||||||
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.empty());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.empty());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_InlinedVectorEmptyExternal);
|
BENCHMARK(BM_InlinedVectorEmptyExternal);
|
||||||
|
|
||||||
void BM_StdVectorEmpty(benchmark::State& state) {
|
void BM_StdVectorEmpty(benchmark::State& state) {
|
||||||
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
UNROLL_16(v.empty());
|
benchmark::DoNotOptimize(v);
|
||||||
|
benchmark::DoNotOptimize(v.empty());
|
||||||
}
|
}
|
||||||
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
|
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_StdVectorEmpty);
|
BENCHMARK(BM_StdVectorEmpty);
|
||||||
|
|
||||||
|
|
|
@ -24,11 +24,12 @@
|
||||||
namespace absl {
|
namespace absl {
|
||||||
namespace inlined_vector_internal {
|
namespace inlined_vector_internal {
|
||||||
|
|
||||||
template <typename T, size_t N, typename A>
|
template <typename InlinedVector>
|
||||||
class InlinedVectorStorage {
|
class Storage;
|
||||||
static_assert(
|
|
||||||
N > 0, "InlinedVector cannot be instantiated with `0` inline elements.");
|
|
||||||
|
|
||||||
|
template <template <typename, size_t, typename> class InlinedVector, typename T,
|
||||||
|
size_t N, typename A>
|
||||||
|
class Storage<InlinedVector<T, N, A>> {
|
||||||
public:
|
public:
|
||||||
using allocator_type = A;
|
using allocator_type = A;
|
||||||
using value_type = typename allocator_type::value_type;
|
using value_type = typename allocator_type::value_type;
|
||||||
|
@ -44,12 +45,7 @@ class InlinedVectorStorage {
|
||||||
using reverse_iterator = std::reverse_iterator<iterator>;
|
using reverse_iterator = std::reverse_iterator<iterator>;
|
||||||
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
||||||
|
|
||||||
constexpr static size_type GetInlinedCapacity() {
|
explicit Storage(const allocator_type& a) : allocator_and_tag_(a) {}
|
||||||
return static_cast<size_type>(N);
|
|
||||||
}
|
|
||||||
|
|
||||||
explicit InlinedVectorStorage(const allocator_type& a)
|
|
||||||
: allocator_and_tag_(a) {}
|
|
||||||
|
|
||||||
// TODO(johnsoncj): Make the below types and members private after migration
|
// TODO(johnsoncj): Make the below types and members private after migration
|
||||||
|
|
||||||
|
|
|
@ -845,6 +845,25 @@ TEST(Table, Erase) {
|
||||||
EXPECT_TRUE(t.find(0) == t.end());
|
EXPECT_TRUE(t.find(0) == t.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(Table, EraseMaintainsValidIterator) {
|
||||||
|
IntTable t;
|
||||||
|
const int kNumElements = 100;
|
||||||
|
for (int i = 0; i < kNumElements; i ++) {
|
||||||
|
EXPECT_TRUE(t.emplace(i).second);
|
||||||
|
}
|
||||||
|
EXPECT_EQ(t.size(), kNumElements);
|
||||||
|
|
||||||
|
int num_erase_calls = 0;
|
||||||
|
auto it = t.begin();
|
||||||
|
while (it != t.end()) {
|
||||||
|
t.erase(it++);
|
||||||
|
num_erase_calls++;
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPECT_TRUE(t.empty());
|
||||||
|
EXPECT_EQ(num_erase_calls, kNumElements);
|
||||||
|
}
|
||||||
|
|
||||||
// Collect N bad keys by following algorithm:
|
// Collect N bad keys by following algorithm:
|
||||||
// 1. Create an empty table and reserve it to 2 * N.
|
// 1. Create an empty table and reserve it to 2 * N.
|
||||||
// 2. Insert N random elements.
|
// 2. Insert N random elements.
|
||||||
|
|
|
@ -483,16 +483,17 @@ inline void AssertHashEnabled() {
|
||||||
|
|
||||||
} // namespace type_traits_internal
|
} // namespace type_traits_internal
|
||||||
|
|
||||||
} // namespace absl
|
|
||||||
|
|
||||||
// An internal namespace that is required to implement the C++17 swap traits.
|
// An internal namespace that is required to implement the C++17 swap traits.
|
||||||
//
|
// It is not further nested in type_traits_internal to avoid long symbol names.
|
||||||
// NOTE: This is its own top-level namespace to avoid subtleties due to
|
namespace swap_internal {
|
||||||
// functions named "swap" that may appear in the absl namespace.
|
|
||||||
namespace absl_internal_swap {
|
|
||||||
|
|
||||||
|
// Necessary for the traits.
|
||||||
using std::swap;
|
using std::swap;
|
||||||
|
|
||||||
|
// This declaration prevents global `swap` and `absl::swap` overloads from being
|
||||||
|
// considered unless ADL picks them up.
|
||||||
|
void swap();
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
using IsSwappableImpl = decltype(swap(std::declval<T&>(), std::declval<T&>()));
|
using IsSwappableImpl = decltype(swap(std::declval<T&>(), std::declval<T&>()));
|
||||||
|
|
||||||
|
@ -520,22 +521,13 @@ struct IsNothrowSwappable
|
||||||
|
|
||||||
// Swap()
|
// Swap()
|
||||||
//
|
//
|
||||||
// Performs the swap idiom from a namespace with no additional `swap` overloads.
|
// Performs the swap idiom from a namespace where valid candidates may only be
|
||||||
|
// found in `std` or via ADL.
|
||||||
template <class T, absl::enable_if_t<IsSwappable<T>::value, int> = 0>
|
template <class T, absl::enable_if_t<IsSwappable<T>::value, int> = 0>
|
||||||
void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable<T>::value) {
|
void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable<T>::value) {
|
||||||
swap(lhs, rhs);
|
swap(lhs, rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace absl_internal_swap
|
|
||||||
|
|
||||||
namespace absl {
|
|
||||||
namespace type_traits_internal {
|
|
||||||
|
|
||||||
// Make the swap-related traits/function accessible from this namespace.
|
|
||||||
using absl_internal_swap::IsNothrowSwappable;
|
|
||||||
using absl_internal_swap::IsSwappable;
|
|
||||||
using absl_internal_swap::Swap;
|
|
||||||
|
|
||||||
// StdSwapIsUnconstrained
|
// StdSwapIsUnconstrained
|
||||||
//
|
//
|
||||||
// Some standard library implementations are broken in that they do not
|
// Some standard library implementations are broken in that they do not
|
||||||
|
@ -543,6 +535,16 @@ using absl_internal_swap::Swap;
|
||||||
// one of those implementations.
|
// one of those implementations.
|
||||||
using StdSwapIsUnconstrained = IsSwappable<void()>;
|
using StdSwapIsUnconstrained = IsSwappable<void()>;
|
||||||
|
|
||||||
|
} // namespace swap_internal
|
||||||
|
|
||||||
|
namespace type_traits_internal {
|
||||||
|
|
||||||
|
// Make the swap-related traits/function accessible from this namespace.
|
||||||
|
using swap_internal::IsNothrowSwappable;
|
||||||
|
using swap_internal::IsSwappable;
|
||||||
|
using swap_internal::Swap;
|
||||||
|
using swap_internal::StdSwapIsUnconstrained;
|
||||||
|
|
||||||
} // namespace type_traits_internal
|
} // namespace type_traits_internal
|
||||||
} // namespace absl
|
} // namespace absl
|
||||||
|
|
||||||
|
|
|
@ -118,6 +118,10 @@ ABSL_CONST_INIT absl::base_internal::AtomicHook<
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
|
||||||
|
bool locking, bool trylock,
|
||||||
|
bool read_lock);
|
||||||
|
|
||||||
void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
|
void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
|
||||||
submit_profile_data.Store(fn);
|
submit_profile_data.Store(fn);
|
||||||
}
|
}
|
||||||
|
@ -233,15 +237,14 @@ enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
|
||||||
SYNCH_EV_SIGNALALL,
|
SYNCH_EV_SIGNALALL,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum { // Event flags
|
enum { // Event flags
|
||||||
SYNCH_F_R = 0x01, // reader event
|
SYNCH_F_R = 0x01, // reader event
|
||||||
SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
|
SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
|
||||||
SYNCH_F_ACQ = 0x04, // event is an acquire
|
SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
|
||||||
|
SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
|
||||||
|
|
||||||
SYNCH_F_LCK_W = SYNCH_F_LCK,
|
SYNCH_F_LCK_W = SYNCH_F_LCK,
|
||||||
SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
|
SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
|
||||||
SYNCH_F_ACQ_W = SYNCH_F_ACQ,
|
|
||||||
SYNCH_F_ACQ_R = SYNCH_F_ACQ | SYNCH_F_R,
|
|
||||||
};
|
};
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
|
@ -250,20 +253,20 @@ static const struct {
|
||||||
int flags;
|
int flags;
|
||||||
const char *msg;
|
const char *msg;
|
||||||
} event_properties[] = {
|
} event_properties[] = {
|
||||||
{ SYNCH_F_LCK_W|SYNCH_F_ACQ_W, "TryLock succeeded " },
|
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
|
||||||
{ 0, "TryLock failed " },
|
{0, "TryLock failed "},
|
||||||
{ SYNCH_F_LCK_R|SYNCH_F_ACQ_R, "ReaderTryLock succeeded " },
|
{SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
|
||||||
{ 0, "ReaderTryLock failed " },
|
{0, "ReaderTryLock failed "},
|
||||||
{ SYNCH_F_ACQ_W, "Lock blocking " },
|
{0, "Lock blocking "},
|
||||||
{ SYNCH_F_LCK_W, "Lock returning " },
|
{SYNCH_F_LCK_W, "Lock returning "},
|
||||||
{ SYNCH_F_ACQ_R, "ReaderLock blocking " },
|
{0, "ReaderLock blocking "},
|
||||||
{ SYNCH_F_LCK_R, "ReaderLock returning " },
|
{SYNCH_F_LCK_R, "ReaderLock returning "},
|
||||||
{ SYNCH_F_LCK_W, "Unlock " },
|
{SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
|
||||||
{ SYNCH_F_LCK_R, "ReaderUnlock " },
|
{SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
|
||||||
{ 0, "Wait on " },
|
{0, "Wait on "},
|
||||||
{ 0, "Wait unblocked " },
|
{0, "Wait unblocked "},
|
||||||
{ 0, "Signal on " },
|
{0, "Signal on "},
|
||||||
{ 0, "SignalAll on " },
|
{0, "SignalAll on "},
|
||||||
};
|
};
|
||||||
|
|
||||||
static absl::base_internal::SpinLock synch_event_mu(
|
static absl::base_internal::SpinLock synch_event_mu(
|
||||||
|
@ -415,9 +418,26 @@ static void PostSynchEvent(void *obj, int ev) {
|
||||||
ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
|
ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
|
||||||
(e == nullptr ? "" : e->name), buffer);
|
(e == nullptr ? "" : e->name), buffer);
|
||||||
}
|
}
|
||||||
if ((event_properties[ev].flags & SYNCH_F_LCK) != 0 && e != nullptr &&
|
const int flags = event_properties[ev].flags;
|
||||||
e->invariant != nullptr) {
|
if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
|
||||||
(*e->invariant)(e->arg);
|
// Calling the invariant as is causes problems under ThreadSanitizer.
|
||||||
|
// We are currently inside of Mutex Lock/Unlock and are ignoring all
|
||||||
|
// memory accesses and synchronization. If the invariant transitively
|
||||||
|
// synchronizes something else and we ignore the synchronization, we will
|
||||||
|
// get false positive race reports later.
|
||||||
|
// Reuse EvalConditionAnnotated to properly call into user code.
|
||||||
|
struct local {
|
||||||
|
static bool pred(SynchEvent *ev) {
|
||||||
|
(*ev->invariant)(ev->arg);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Condition cond(&local::pred, e);
|
||||||
|
Mutex *mu = static_cast<Mutex *>(obj);
|
||||||
|
const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
|
||||||
|
const bool trylock = (flags & SYNCH_F_TRY) != 0;
|
||||||
|
const bool read_lock = (flags & SYNCH_F_R) != 0;
|
||||||
|
EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
|
||||||
}
|
}
|
||||||
UnrefSynchEvent(e);
|
UnrefSynchEvent(e);
|
||||||
}
|
}
|
||||||
|
@ -1553,7 +1573,7 @@ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
|
||||||
ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
|
ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
|
||||||
this->LockSlowLoop(&waitp, flags);
|
this->LockSlowLoop(&waitp, flags);
|
||||||
bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
|
bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
|
||||||
cond.Eval();
|
EvalConditionAnnotated(&cond, this, true, false, how == kShared);
|
||||||
ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
|
ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -1731,12 +1751,17 @@ void Mutex::LockSlow(MuHow how, const Condition *cond, int flags) {
|
||||||
|
|
||||||
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
|
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
|
||||||
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
|
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
|
||||||
bool locking, Mutex::MuHow how) {
|
bool locking, bool trylock,
|
||||||
|
bool read_lock) {
|
||||||
// Delicate annotation dance.
|
// Delicate annotation dance.
|
||||||
// We are currently inside of read/write lock/unlock operation.
|
// We are currently inside of read/write lock/unlock operation.
|
||||||
// All memory accesses are ignored inside of mutex operations + for unlock
|
// All memory accesses are ignored inside of mutex operations + for unlock
|
||||||
// operation tsan considers that we've already released the mutex.
|
// operation tsan considers that we've already released the mutex.
|
||||||
bool res = false;
|
bool res = false;
|
||||||
|
#ifdef THREAD_SANITIZER
|
||||||
|
const int flags = read_lock ? __tsan_mutex_read_lock : 0;
|
||||||
|
const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
|
||||||
|
#endif
|
||||||
if (locking) {
|
if (locking) {
|
||||||
// For lock we pretend that we have finished the operation,
|
// For lock we pretend that we have finished the operation,
|
||||||
// evaluate the predicate, then unlock the mutex and start locking it again
|
// evaluate the predicate, then unlock the mutex and start locking it again
|
||||||
|
@ -1744,24 +1769,26 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
|
||||||
// Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
|
// Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
|
||||||
// will think the lock acquisition is recursive which will trigger
|
// will think the lock acquisition is recursive which will trigger
|
||||||
// deadlock detector.
|
// deadlock detector.
|
||||||
ABSL_TSAN_MUTEX_POST_LOCK(mu, TsanFlags(how), 0);
|
ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
|
||||||
res = cond->Eval();
|
res = cond->Eval();
|
||||||
ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, TsanFlags(how));
|
// There is no "try" version of Unlock, so use flags instead of tryflags.
|
||||||
ABSL_TSAN_MUTEX_POST_UNLOCK(mu, TsanFlags(how));
|
ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
|
||||||
ABSL_TSAN_MUTEX_PRE_LOCK(mu, TsanFlags(how));
|
ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
|
||||||
|
ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
|
||||||
} else {
|
} else {
|
||||||
// Similarly, for unlock we pretend that we have unlocked the mutex,
|
// Similarly, for unlock we pretend that we have unlocked the mutex,
|
||||||
// lock the mutex, evaluate the predicate, and start unlocking it again
|
// lock the mutex, evaluate the predicate, and start unlocking it again
|
||||||
// to match the annotation at the end of outer unlock operation.
|
// to match the annotation at the end of outer unlock operation.
|
||||||
ABSL_TSAN_MUTEX_POST_UNLOCK(mu, TsanFlags(how));
|
ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
|
||||||
ABSL_TSAN_MUTEX_PRE_LOCK(mu, TsanFlags(how));
|
ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
|
||||||
ABSL_TSAN_MUTEX_POST_LOCK(mu, TsanFlags(how), 0);
|
ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
|
||||||
res = cond->Eval();
|
res = cond->Eval();
|
||||||
ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, TsanFlags(how));
|
ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
|
||||||
}
|
}
|
||||||
// Prevent unused param warnings in non-TSAN builds.
|
// Prevent unused param warnings in non-TSAN builds.
|
||||||
static_cast<void>(mu);
|
static_cast<void>(mu);
|
||||||
static_cast<void>(how);
|
static_cast<void>(trylock);
|
||||||
|
static_cast<void>(read_lock);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1807,7 +1834,8 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
|
||||||
v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
|
v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
|
||||||
how->fast_add,
|
how->fast_add,
|
||||||
std::memory_order_acquire, std::memory_order_relaxed)) {
|
std::memory_order_acquire, std::memory_order_relaxed)) {
|
||||||
if (cond == nullptr || EvalConditionAnnotated(cond, this, true, how)) {
|
if (cond == nullptr ||
|
||||||
|
EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
unlock = true;
|
unlock = true;
|
||||||
|
@ -1825,7 +1853,8 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
|
||||||
}
|
}
|
||||||
this->LockSlowLoop(&waitp, flags);
|
this->LockSlowLoop(&waitp, flags);
|
||||||
return waitp.cond != nullptr || // => cond known true from LockSlowLoop
|
return waitp.cond != nullptr || // => cond known true from LockSlowLoop
|
||||||
cond == nullptr || EvalConditionAnnotated(cond, this, true, how);
|
cond == nullptr ||
|
||||||
|
EvalConditionAnnotated(cond, this, true, false, how == kShared);
|
||||||
}
|
}
|
||||||
|
|
||||||
// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
|
// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
|
||||||
|
@ -1881,7 +1910,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
|
||||||
waitp->how->fast_add,
|
waitp->how->fast_add,
|
||||||
std::memory_order_acquire, std::memory_order_relaxed)) {
|
std::memory_order_acquire, std::memory_order_relaxed)) {
|
||||||
if (waitp->cond == nullptr ||
|
if (waitp->cond == nullptr ||
|
||||||
EvalConditionAnnotated(waitp->cond, this, true, waitp->how)) {
|
EvalConditionAnnotated(waitp->cond, this, true, false,
|
||||||
|
waitp->how == kShared)) {
|
||||||
break; // we timed out, or condition true, so return
|
break; // we timed out, or condition true, so return
|
||||||
}
|
}
|
||||||
this->UnlockSlow(waitp); // got lock but condition false
|
this->UnlockSlow(waitp); // got lock but condition false
|
||||||
|
@ -1924,7 +1954,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
|
||||||
std::memory_order_release,
|
std::memory_order_release,
|
||||||
std::memory_order_relaxed));
|
std::memory_order_relaxed));
|
||||||
if (waitp->cond == nullptr ||
|
if (waitp->cond == nullptr ||
|
||||||
EvalConditionAnnotated(waitp->cond, this, true, waitp->how)) {
|
EvalConditionAnnotated(waitp->cond, this, true, false,
|
||||||
|
waitp->how == kShared)) {
|
||||||
break; // we timed out, or condition true, so return
|
break; // we timed out, or condition true, so return
|
||||||
}
|
}
|
||||||
this->UnlockSlow(waitp); // got lock but condition false
|
this->UnlockSlow(waitp); // got lock but condition false
|
||||||
|
|
Loading…
Reference in a new issue