- 45b4111d940009bc7b3ebf621c9cb9293c60344a Use copied value i in lambda by Abseil Team <absl-team@google.com>
- b726b3102f8439b8480b6ff52fc3660162fa0bd9 Fix MSVC compiler warning on 32-bit platforms (GitHub iss... by Derek Mauro <dmauro@google.com> - a8a29e636c85bd7d82c0cbc824a1c2e498764337 Explicitly forbid to specify the template parameter to Wr... by Abseil Team <absl-team@google.com> - 566a1d903266fdbfdcf758401c356a0c6703422d Add missing license header to BUILD file. by Alex Strelnikov <strel@google.com> - ef1c0642cde0bbad62bbb30715256b232a4ab817 Fix BUILD file header. by Alex Strelnikov <strel@google.com> - b6e2cf00f808ee32b9eb7b3226af79d628742c20 Release GraphCycles microbenchmark. by Alex Strelnikov <strel@google.com> - f592d78f549e7a242bf2bb4858a26645a655eac3 Release Mutex microbenchmarks. by Alex Strelnikov <strel@google.com> GitOrigin-RevId: 45b4111d940009bc7b3ebf621c9cb9293c60344a Change-Id: I82885ae176952a764574c6d4616e312a977407b2
This commit is contained in:
parent
3e671c7821
commit
014f02a3ec
11 changed files with 255 additions and 35 deletions
|
@ -102,6 +102,7 @@ cc_library(
|
|||
cc_library(
|
||||
name = "base_internal",
|
||||
hdrs = [
|
||||
"internal/hide_ptr.h",
|
||||
"internal/identity.h",
|
||||
"internal/inline_variable.h",
|
||||
"internal/invoke.h",
|
||||
|
|
|
@ -36,6 +36,7 @@ list(APPEND BASE_INTERNAL_HEADERS
|
|||
"internal/endian.h"
|
||||
"internal/exception_testing.h"
|
||||
"internal/exception_safety_testing.h"
|
||||
"internal/hide_ptr.h"
|
||||
"internal/identity.h"
|
||||
"internal/invoke.h"
|
||||
"internal/inline_variable.h"
|
||||
|
|
49
absl/base/internal/hide_ptr.h
Normal file
49
absl/base/internal/hide_ptr.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
|
||||
#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
// Arbitrary value with high bits set. Xor'ing with it is unlikely
|
||||
// to map one valid pointer to another valid pointer.
|
||||
constexpr uintptr_t HideMask() {
|
||||
static_assert(sizeof(uintptr_t) == 4 || sizeof(uintptr_t) == 8,
|
||||
"uintptr_t must be 32 or 64 bits");
|
||||
return sizeof(uintptr_t) == 8 ? 0xF03A5F7BF03A5F7BULL : 0xF03A5F7BUL;
|
||||
}
|
||||
|
||||
// Hide a pointer from the leak checker. For internal use only.
|
||||
// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
|
||||
// and all objects reachable from ptr to be ignored by the leak checker.
|
||||
template <class T>
|
||||
inline uintptr_t HidePtr(T* ptr) {
|
||||
return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
|
||||
}
|
||||
|
||||
// Return a pointer that has been hidden from the leak checker.
|
||||
// For internal use only.
|
||||
template <class T>
|
||||
inline T* UnhidePtr(uintptr_t hidden) {
|
||||
return reinterpret_cast<T*>(hidden ^ HideMask());
|
||||
}
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_
|
|
@ -40,7 +40,8 @@ namespace absl {
|
|||
// -----------------------------------------------------------------------------
|
||||
//
|
||||
// Adopts ownership from a raw pointer and transfers it to the returned
|
||||
// `std::unique_ptr`, whose type is deduced.
|
||||
// `std::unique_ptr`, whose type is deduced. DO NOT specify the template type T
|
||||
// when calling WrapUnique.
|
||||
//
|
||||
// Example:
|
||||
// X* NewX(int, int);
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
# Copyright 2018 The Abseil Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load(
|
||||
"//absl:copts.bzl",
|
||||
"ABSL_DEFAULT_COPTS",
|
||||
|
|
|
@ -13,10 +13,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# -*- mode: python; -*-
|
||||
# Libraries in this low-level package may not depend on libraries in packages
|
||||
# that are not low level. For more information, including how to submit
|
||||
# changes to this file, see http://www/eng/howto/build-monitors.html
|
||||
|
||||
load(
|
||||
"//absl:copts.bzl",
|
||||
|
|
|
@ -39,6 +39,7 @@ cc_library(
|
|||
],
|
||||
deps = [
|
||||
"//absl/base",
|
||||
"//absl/base:base_internal",
|
||||
"//absl/base:core_headers",
|
||||
"//absl/base:malloc_internal",
|
||||
],
|
||||
|
@ -119,6 +120,20 @@ cc_test(
|
|||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "graphcycles_benchmark",
|
||||
srcs = ["internal/graphcycles_benchmark.cc"],
|
||||
copts = ABSL_TEST_COPTS,
|
||||
tags = [
|
||||
"benchmark",
|
||||
],
|
||||
deps = [
|
||||
":graphcycles_internal",
|
||||
"//absl/base",
|
||||
"@com_github_google_benchmark//:benchmark",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "thread_pool",
|
||||
testonly = 1,
|
||||
|
@ -148,6 +163,20 @@ cc_test(
|
|||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "mutex_benchmark",
|
||||
srcs = ["mutex_benchmark.cc"],
|
||||
copts = ABSL_TEST_COPTS,
|
||||
tags = ["benchmark"],
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
":synchronization",
|
||||
":thread_pool",
|
||||
"//absl/base",
|
||||
"@com_github_google_benchmark//:benchmark",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "notification_test",
|
||||
size = "small",
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include "absl/base/internal/hide_ptr.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/internal/spinlock.h"
|
||||
|
||||
|
@ -276,18 +277,6 @@ inline uint32_t NodeVersion(GraphId id) {
|
|||
return static_cast<uint32_t>(id.handle >> 32);
|
||||
}
|
||||
|
||||
// We need to hide Mutexes (or other deadlock detection's pointers)
|
||||
// from the leak detector. Xor with an arbitrary number with high bits set.
|
||||
static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll);
|
||||
|
||||
static inline uintptr_t MaskPtr(void *ptr) {
|
||||
return reinterpret_cast<uintptr_t>(ptr) ^ kHideMask;
|
||||
}
|
||||
|
||||
static inline void* UnmaskPtr(uintptr_t word) {
|
||||
return reinterpret_cast<void*>(word ^ kHideMask);
|
||||
}
|
||||
|
||||
struct Node {
|
||||
int32_t rank; // rank number assigned by Pearce-Kelly algorithm
|
||||
uint32_t version; // Current version number
|
||||
|
@ -309,7 +298,7 @@ class PointerMap {
|
|||
}
|
||||
|
||||
int32_t Find(void* ptr) {
|
||||
auto masked = MaskPtr(ptr);
|
||||
auto masked = base_internal::HidePtr(ptr);
|
||||
for (int32_t i = table_[Hash(ptr)]; i != -1;) {
|
||||
Node* n = (*nodes_)[i];
|
||||
if (n->masked_ptr == masked) return i;
|
||||
|
@ -327,7 +316,7 @@ class PointerMap {
|
|||
int32_t Remove(void* ptr) {
|
||||
// Advance through linked list while keeping track of the
|
||||
// predecessor slot that points to the current entry.
|
||||
auto masked = MaskPtr(ptr);
|
||||
auto masked = base_internal::HidePtr(ptr);
|
||||
for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
|
||||
int32_t index = *slot;
|
||||
Node* n = (*nodes_)[index];
|
||||
|
@ -395,7 +384,7 @@ bool GraphCycles::CheckInvariants() const {
|
|||
NodeSet ranks; // Set of ranks seen so far.
|
||||
for (uint32_t x = 0; x < r->nodes_.size(); x++) {
|
||||
Node* nx = r->nodes_[x];
|
||||
void* ptr = UnmaskPtr(nx->masked_ptr);
|
||||
void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
|
||||
if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
|
||||
ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
|
||||
}
|
||||
|
@ -427,7 +416,7 @@ GraphId GraphCycles::GetId(void* ptr) {
|
|||
n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
|
||||
n->visited = false;
|
||||
n->rank = rep_->nodes_.size();
|
||||
n->masked_ptr = MaskPtr(ptr);
|
||||
n->masked_ptr = base_internal::HidePtr(ptr);
|
||||
n->nstack = 0;
|
||||
n->priority = 0;
|
||||
rep_->nodes_.push_back(n);
|
||||
|
@ -439,7 +428,7 @@ GraphId GraphCycles::GetId(void* ptr) {
|
|||
int32_t r = rep_->free_nodes_.back();
|
||||
rep_->free_nodes_.pop_back();
|
||||
Node* n = rep_->nodes_[r];
|
||||
n->masked_ptr = MaskPtr(ptr);
|
||||
n->masked_ptr = base_internal::HidePtr(ptr);
|
||||
n->nstack = 0;
|
||||
n->priority = 0;
|
||||
rep_->ptrmap_.Add(ptr, r);
|
||||
|
@ -461,7 +450,7 @@ void GraphCycles::RemoveNode(void* ptr) {
|
|||
}
|
||||
x->in.clear();
|
||||
x->out.clear();
|
||||
x->masked_ptr = MaskPtr(nullptr);
|
||||
x->masked_ptr = base_internal::HidePtr<void>(nullptr);
|
||||
if (x->version == std::numeric_limits<uint32_t>::max()) {
|
||||
// Cannot use x any more
|
||||
} else {
|
||||
|
@ -472,7 +461,8 @@ void GraphCycles::RemoveNode(void* ptr) {
|
|||
|
||||
void* GraphCycles::Ptr(GraphId id) {
|
||||
Node* n = FindNode(rep_, id);
|
||||
return n == nullptr ? nullptr : UnmaskPtr(n->masked_ptr);
|
||||
return n == nullptr ? nullptr
|
||||
: base_internal::UnhidePtr<void>(n->masked_ptr);
|
||||
}
|
||||
|
||||
bool GraphCycles::HasNode(GraphId node) {
|
||||
|
|
46
absl/synchronization/internal/graphcycles_benchmark.cc
Normal file
46
absl/synchronization/internal/graphcycles_benchmark.cc
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/synchronization/internal/graphcycles.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
|
||||
namespace {
|
||||
|
||||
void BM_StressTest(benchmark::State& state) {
|
||||
const int num_nodes = state.range(0);
|
||||
while (state.KeepRunningBatch(num_nodes)) {
|
||||
absl::synchronization_internal::GraphCycles g;
|
||||
std::vector<absl::synchronization_internal::GraphId> nodes(num_nodes);
|
||||
for (int i = 0; i < num_nodes; i++) {
|
||||
nodes[i] = g.GetId(reinterpret_cast<void*>(static_cast<uintptr_t>(i)));
|
||||
}
|
||||
for (int i = 0; i < num_nodes; i++) {
|
||||
int end = std::min(num_nodes, i + 5);
|
||||
for (int j = i + 1; j < end; j++) {
|
||||
ABSL_RAW_CHECK(g.InsertEdge(nodes[i], nodes[j]), "");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_StressTest)->Range(2048, 1048576);
|
||||
|
||||
} // namespace
|
||||
|
||||
BENCHMARK_MAIN();
|
|
@ -43,6 +43,7 @@
|
|||
#include "absl/base/dynamic_annotations.h"
|
||||
#include "absl/base/internal/atomic_hook.h"
|
||||
#include "absl/base/internal/cycleclock.h"
|
||||
#include "absl/base/internal/hide_ptr.h"
|
||||
#include "absl/base/internal/low_level_alloc.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/internal/spinlock.h"
|
||||
|
@ -272,13 +273,6 @@ static absl::base_internal::SpinLock synch_event_mu(
|
|||
// Can't be too small, as it's used for deadlock detection information.
|
||||
static const uint32_t kNSynchEvent = 1031;
|
||||
|
||||
// We need to hide Mutexes (or other deadlock detection's pointers)
|
||||
// from the leak detector.
|
||||
static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL);
|
||||
static uintptr_t MaskMu(const void *mu) {
|
||||
return reinterpret_cast<uintptr_t>(mu) ^ kHideMask;
|
||||
}
|
||||
|
||||
static struct SynchEvent { // this is a trivial hash table for the events
|
||||
// struct is freed when refcount reaches 0
|
||||
int refcount GUARDED_BY(synch_event_mu);
|
||||
|
@ -314,7 +308,8 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
|
|||
SynchEvent *e;
|
||||
// first look for existing SynchEvent struct..
|
||||
synch_event_mu.Lock();
|
||||
for (e = synch_event[h]; e != nullptr && e->masked_addr != MaskMu(addr);
|
||||
for (e = synch_event[h];
|
||||
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
|
||||
e = e->next) {
|
||||
}
|
||||
if (e == nullptr) { // no SynchEvent struct found; make one.
|
||||
|
@ -325,7 +320,7 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
|
|||
e = reinterpret_cast<SynchEvent *>(
|
||||
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
|
||||
e->refcount = 2; // one for return value, one for linked list
|
||||
e->masked_addr = MaskMu(addr);
|
||||
e->masked_addr = base_internal::HidePtr(addr);
|
||||
e->invariant = nullptr;
|
||||
e->arg = nullptr;
|
||||
e->log = false;
|
||||
|
@ -367,7 +362,8 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
|
|||
SynchEvent *e;
|
||||
synch_event_mu.Lock();
|
||||
for (pe = &synch_event[h];
|
||||
(e = *pe) != nullptr && e->masked_addr != MaskMu(addr); pe = &e->next) {
|
||||
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
|
||||
pe = &e->next) {
|
||||
}
|
||||
bool del = false;
|
||||
if (e != nullptr) {
|
||||
|
@ -388,7 +384,8 @@ static SynchEvent *GetSynchEvent(const void *addr) {
|
|||
uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
|
||||
SynchEvent *e;
|
||||
synch_event_mu.Lock();
|
||||
for (e = synch_event[h]; e != nullptr && e->masked_addr != MaskMu(addr);
|
||||
for (e = synch_event[h];
|
||||
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
|
||||
e = e->next) {
|
||||
}
|
||||
if (e != nullptr) {
|
||||
|
|
96
absl/synchronization/mutex_benchmark.cc
Normal file
96
absl/synchronization/mutex_benchmark.cc
Normal file
|
@ -0,0 +1,96 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "absl/base/internal/sysinfo.h"
|
||||
#include "absl/synchronization/blocking_counter.h"
|
||||
#include "absl/synchronization/internal/thread_pool.h"
|
||||
#include "absl/synchronization/mutex.h"
|
||||
|
||||
namespace {
|
||||
|
||||
// Measure the overhead of conditions on mutex release (when they must be
|
||||
// evaluated). Mutex has (some) support for equivalence classes allowing
|
||||
// Conditions with the same function/argument to potentially not be multiply
|
||||
// evaluated.
|
||||
//
|
||||
// num_classes==0 is used for the special case of every waiter being distinct.
|
||||
void BM_ConditionWaiters(benchmark::State& state) {
|
||||
int num_classes = state.range(0);
|
||||
int num_waiters = state.range(1);
|
||||
|
||||
struct Helper {
|
||||
static void Waiter(absl::BlockingCounter* init, absl::Mutex* m, int* p) {
|
||||
init->DecrementCount();
|
||||
m->LockWhen(absl::Condition(
|
||||
static_cast<bool (*)(int*)>([](int* v) { return *v == 0; }), p));
|
||||
m->Unlock();
|
||||
}
|
||||
};
|
||||
|
||||
if (num_classes == 0) {
|
||||
// No equivalence classes.
|
||||
num_classes = num_waiters;
|
||||
}
|
||||
|
||||
absl::BlockingCounter init(num_waiters);
|
||||
absl::Mutex mu;
|
||||
std::vector<int> equivalence_classes(num_classes, 1);
|
||||
|
||||
// Must be declared last to be destroyed first.
|
||||
absl::synchronization_internal::ThreadPool pool(num_waiters);
|
||||
|
||||
for (int i = 0; i < num_waiters; i++) {
|
||||
// Mutex considers Conditions with the same function and argument
|
||||
// to be equivalent.
|
||||
pool.Schedule([&, i] {
|
||||
Helper::Waiter(&init, &mu, &equivalence_classes[i % num_classes]);
|
||||
});
|
||||
}
|
||||
init.Wait();
|
||||
|
||||
for (auto _ : state) {
|
||||
mu.Lock();
|
||||
mu.Unlock(); // Each unlock requires Condition evaluation for our waiters.
|
||||
}
|
||||
|
||||
mu.Lock();
|
||||
for (int i = 0; i < num_classes; i++) {
|
||||
equivalence_classes[i] = 0;
|
||||
}
|
||||
mu.Unlock();
|
||||
}
|
||||
|
||||
#ifdef THREAD_SANITIZER
|
||||
// ThreadSanitizer can't handle 8192 threads.
|
||||
constexpr int kMaxConditionWaiters = 2048;
|
||||
#else
|
||||
constexpr int kMaxConditionWaiters = 8192;
|
||||
#endif
|
||||
BENCHMARK(BM_ConditionWaiters)->RangePair(0, 2, 1, kMaxConditionWaiters);
|
||||
|
||||
void BM_ContendedMutex(benchmark::State& state) {
|
||||
static absl::Mutex* mu = new absl::Mutex;
|
||||
for (auto _ : state) {
|
||||
absl::MutexLock lock(mu);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_ContendedMutex)->Threads(1);
|
||||
BENCHMARK(BM_ContendedMutex)->ThreadPerCpu();
|
||||
|
||||
} // namespace
|
||||
|
||||
BENCHMARK_MAIN();
|
Loading…
Reference in a new issue