Export of internal Abseil changes
-- 90ecacd2a3db96ee64ef23af37a80fad404e2b32 by Gennadiy Rozental <rogeeff@google.com>: Fixes MSVC regression by making MSVC version of class Flag into an aggregate type. PiperOrigin-RevId: 277767054 -- 018f3b040df51d91a988fa146fee163721e605e9 by Abseil Team <absl-team@google.com>: Change libstdc++ lacking std::unique_ptr check from a gcc version check to based on feature macros. PiperOrigin-RevId: 277736042 -- 475844775ae343e2414318f08549ee3fa6676a8d by CJ Johnson <johnsoncj@google.com>: Pass allocator_type through allocator_traits before extracting the typedefs PiperOrigin-RevId: 277730393 -- d843bc4bc30bf5b11af76db8beda8634b6111a62 by Abseil Team <absl-team@google.com>: Convert the Waiter::Init() method to the default constructor and define a destructor for the Waiter class. Use placement new and delete with Waiter objects. PiperOrigin-RevId: 277728823 -- 1ba6edf421dd2dfe13c55970a03c99592cb6677d by Derek Mauro <dmauro@google.com>: Use lowercase spelling for include of dbghelp.h When cross-compiling under MinGW this is important PiperOrigin-RevId: 277629783 -- cfc662a6fa357a84ddda8037156c7f26cee40c36 by Abseil Team <absl-team@google.com>: Don't use atomic ops on waiter and wakeup counts in WIN32 waiter mode. Port the new CONDVAR waiter mode code in CL 277366017 to the WIN32 waiter mode. PiperOrigin-RevId: 277603611 -- 833106542e61fa0832900adf3c1b2afc6890b94b by Abseil Team <absl-team@google.com>: Add the PerThreadSem::Destroy() method. For ABSL_WAITER_MODE_CONDVAR or ABSL_WAITER_MODE_SEM, PerThreadSem::Destroy() is used to destroy the pthread mutex and condition variable or the POSIX semaphore. PiperOrigin-RevId: 277586675 -- 7814da4a59106cf1e0e4db1a31b9592ebbd2094b by Samuel Benzaquen <sbenza@google.com>: Enable the assertion in the iterator's operator* and operator-> PiperOrigin-RevId: 277563401 GitOrigin-RevId: 90ecacd2a3db96ee64ef23af37a80fad404e2b32 Change-Id: Ib19be3680da74f0b94055c9039115ec6bcaea7b0
This commit is contained in:
parent
83880e3d8c
commit
846e5dbeda
13 changed files with 238 additions and 143 deletions
|
@ -69,9 +69,10 @@ class InlinedVector {
|
|||
static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity.");
|
||||
|
||||
using Storage = inlined_vector_internal::Storage<T, N, A>;
|
||||
using rvalue_reference = typename Storage::rvalue_reference;
|
||||
using MoveIterator = typename Storage::MoveIterator;
|
||||
|
||||
using AllocatorTraits = typename Storage::AllocatorTraits;
|
||||
using RValueReference = typename Storage::RValueReference;
|
||||
using MoveIterator = typename Storage::MoveIterator;
|
||||
using IsMemcpyOk = typename Storage::IsMemcpyOk;
|
||||
|
||||
template <typename Iterator>
|
||||
|
@ -92,10 +93,10 @@ class InlinedVector {
|
|||
using value_type = typename Storage::value_type;
|
||||
using pointer = typename Storage::pointer;
|
||||
using const_pointer = typename Storage::const_pointer;
|
||||
using reference = typename Storage::reference;
|
||||
using const_reference = typename Storage::const_reference;
|
||||
using size_type = typename Storage::size_type;
|
||||
using difference_type = typename Storage::difference_type;
|
||||
using reference = typename Storage::reference;
|
||||
using const_reference = typename Storage::const_reference;
|
||||
using iterator = typename Storage::iterator;
|
||||
using const_iterator = typename Storage::const_iterator;
|
||||
using reverse_iterator = typename Storage::reverse_iterator;
|
||||
|
@ -563,7 +564,7 @@ class InlinedVector {
|
|||
|
||||
// Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
|
||||
// move semantics, returning an `iterator` to the newly inserted element.
|
||||
iterator insert(const_iterator pos, rvalue_reference v) {
|
||||
iterator insert(const_iterator pos, RValueReference v) {
|
||||
return emplace(pos, std::move(v));
|
||||
}
|
||||
|
||||
|
@ -660,7 +661,7 @@ class InlinedVector {
|
|||
|
||||
// Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
|
||||
// using move semantics.
|
||||
void push_back(rvalue_reference v) {
|
||||
void push_back(RValueReference v) {
|
||||
static_cast<void>(emplace_back(std::move(v)));
|
||||
}
|
||||
|
||||
|
|
|
@ -1754,6 +1754,30 @@ TEST(AllocatorSupportTest, SizeAllocConstructor) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) {
|
||||
using T = int;
|
||||
using A = std::allocator<T>;
|
||||
using ATraits = absl::allocator_traits<A>;
|
||||
|
||||
struct MinimumAllocator {
|
||||
using value_type = T;
|
||||
|
||||
value_type* allocate(size_t n) {
|
||||
A a;
|
||||
return ATraits::allocate(a, n);
|
||||
}
|
||||
|
||||
void deallocate(value_type* p, size_t n) {
|
||||
A a;
|
||||
ATraits::deallocate(a, p, n);
|
||||
}
|
||||
};
|
||||
|
||||
absl::InlinedVector<T, 1, MinimumAllocator> vec;
|
||||
vec.emplace_back();
|
||||
vec.resize(0);
|
||||
}
|
||||
|
||||
TEST(InlinedVectorTest, AbslHashValueWorks) {
|
||||
using V = absl::InlinedVector<int, 4>;
|
||||
std::vector<V> cases;
|
||||
|
|
|
@ -37,16 +37,17 @@ using IsAtLeastForwardIterator = std::is_convertible<
|
|||
typename std::iterator_traits<Iterator>::iterator_category,
|
||||
std::forward_iterator_tag>;
|
||||
|
||||
template <typename AllocatorType>
|
||||
using IsMemcpyOk = absl::conjunction<
|
||||
std::is_same<std::allocator<typename AllocatorType::value_type>,
|
||||
AllocatorType>,
|
||||
absl::is_trivially_copy_constructible<typename AllocatorType::value_type>,
|
||||
absl::is_trivially_copy_assignable<typename AllocatorType::value_type>,
|
||||
absl::is_trivially_destructible<typename AllocatorType::value_type>>;
|
||||
template <typename AllocatorType,
|
||||
typename ValueType =
|
||||
typename absl::allocator_traits<AllocatorType>::value_type>
|
||||
using IsMemcpyOk =
|
||||
absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>,
|
||||
absl::is_trivially_copy_constructible<ValueType>,
|
||||
absl::is_trivially_copy_assignable<ValueType>,
|
||||
absl::is_trivially_destructible<ValueType>>;
|
||||
|
||||
template <typename AllocatorType, typename ValueType, typename SizeType>
|
||||
void DestroyElements(AllocatorType* alloc_ptr, ValueType* destroy_first,
|
||||
template <typename AllocatorType, typename Pointer, typename SizeType>
|
||||
void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
|
||||
SizeType destroy_size) {
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
|
||||
|
@ -57,20 +58,25 @@ void DestroyElements(AllocatorType* alloc_ptr, ValueType* destroy_first,
|
|||
}
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
// Overwrite unused memory with `0xab` so we can catch uninitialized usage.
|
||||
//
|
||||
// Cast to `void*` to tell the compiler that we don't care that we might be
|
||||
// scribbling on a vtable pointer.
|
||||
auto* memory_ptr = static_cast<void*>(destroy_first);
|
||||
auto memory_size = sizeof(ValueType) * destroy_size;
|
||||
std::memset(memory_ptr, 0xab, memory_size);
|
||||
{
|
||||
using ValueType = typename AllocatorTraits::value_type;
|
||||
|
||||
// Overwrite unused memory with `0xab` so we can catch uninitialized
|
||||
// usage.
|
||||
//
|
||||
// Cast to `void*` to tell the compiler that we don't care that we might
|
||||
// be scribbling on a vtable pointer.
|
||||
void* memory_ptr = destroy_first;
|
||||
auto memory_size = destroy_size * sizeof(ValueType);
|
||||
std::memset(memory_ptr, 0xab, memory_size);
|
||||
}
|
||||
#endif // !defined(NDEBUG)
|
||||
}
|
||||
}
|
||||
|
||||
template <typename AllocatorType, typename ValueType, typename ValueAdapter,
|
||||
template <typename AllocatorType, typename Pointer, typename ValueAdapter,
|
||||
typename SizeType>
|
||||
void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first,
|
||||
void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
|
||||
ValueAdapter* values_ptr, SizeType construct_size) {
|
||||
for (SizeType i = 0; i < construct_size; ++i) {
|
||||
ABSL_INTERNAL_TRY {
|
||||
|
@ -83,8 +89,8 @@ void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first,
|
|||
}
|
||||
}
|
||||
|
||||
template <typename ValueType, typename ValueAdapter, typename SizeType>
|
||||
void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr,
|
||||
template <typename Pointer, typename ValueAdapter, typename SizeType>
|
||||
void AssignElements(Pointer assign_first, ValueAdapter* values_ptr,
|
||||
SizeType assign_size) {
|
||||
for (SizeType i = 0; i < assign_size; ++i) {
|
||||
values_ptr->AssignNext(assign_first + i);
|
||||
|
@ -93,28 +99,29 @@ void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr,
|
|||
|
||||
template <typename AllocatorType>
|
||||
struct StorageView {
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using size_type = typename AllocatorType::size_type;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
using Pointer = typename AllocatorTraits::pointer;
|
||||
using SizeType = typename AllocatorTraits::size_type;
|
||||
|
||||
pointer data;
|
||||
size_type size;
|
||||
size_type capacity;
|
||||
Pointer data;
|
||||
SizeType size;
|
||||
SizeType capacity;
|
||||
};
|
||||
|
||||
template <typename AllocatorType, typename Iterator>
|
||||
class IteratorValueAdapter {
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
using Pointer = typename AllocatorTraits::pointer;
|
||||
|
||||
public:
|
||||
explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
|
||||
|
||||
void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
|
||||
void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
|
||||
AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
|
||||
++it_;
|
||||
}
|
||||
|
||||
void AssignNext(pointer assign_at) {
|
||||
void AssignNext(Pointer assign_at) {
|
||||
*assign_at = *it_;
|
||||
++it_;
|
||||
}
|
||||
|
@ -125,46 +132,45 @@ class IteratorValueAdapter {
|
|||
|
||||
template <typename AllocatorType>
|
||||
class CopyValueAdapter {
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using const_pointer = typename AllocatorType::const_pointer;
|
||||
using const_reference = typename AllocatorType::const_reference;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
using ValueType = typename AllocatorTraits::value_type;
|
||||
using Pointer = typename AllocatorTraits::pointer;
|
||||
using ConstPointer = typename AllocatorTraits::const_pointer;
|
||||
|
||||
public:
|
||||
explicit CopyValueAdapter(const_reference v) : ptr_(std::addressof(v)) {}
|
||||
explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {}
|
||||
|
||||
void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
|
||||
void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
|
||||
AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
|
||||
}
|
||||
|
||||
void AssignNext(pointer assign_at) { *assign_at = *ptr_; }
|
||||
void AssignNext(Pointer assign_at) { *assign_at = *ptr_; }
|
||||
|
||||
private:
|
||||
const_pointer ptr_;
|
||||
ConstPointer ptr_;
|
||||
};
|
||||
|
||||
template <typename AllocatorType>
|
||||
class DefaultValueAdapter {
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using value_type = typename AllocatorType::value_type;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
using ValueType = typename AllocatorTraits::value_type;
|
||||
using Pointer = typename AllocatorTraits::pointer;
|
||||
|
||||
public:
|
||||
explicit DefaultValueAdapter() {}
|
||||
|
||||
void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
|
||||
void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
|
||||
AllocatorTraits::construct(*alloc_ptr, construct_at);
|
||||
}
|
||||
|
||||
void AssignNext(pointer assign_at) { *assign_at = value_type(); }
|
||||
void AssignNext(Pointer assign_at) { *assign_at = ValueType(); }
|
||||
};
|
||||
|
||||
template <typename AllocatorType>
|
||||
class AllocationTransaction {
|
||||
using value_type = typename AllocatorType::value_type;
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using size_type = typename AllocatorType::size_type;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
using Pointer = typename AllocatorTraits::pointer;
|
||||
using SizeType = typename AllocatorTraits::size_type;
|
||||
|
||||
public:
|
||||
explicit AllocationTransaction(AllocatorType* alloc_ptr)
|
||||
|
@ -180,11 +186,11 @@ class AllocationTransaction {
|
|||
void operator=(const AllocationTransaction&) = delete;
|
||||
|
||||
AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
|
||||
pointer& GetData() { return alloc_data_.template get<1>(); }
|
||||
size_type& GetCapacity() { return capacity_; }
|
||||
Pointer& GetData() { return alloc_data_.template get<1>(); }
|
||||
SizeType& GetCapacity() { return capacity_; }
|
||||
|
||||
bool DidAllocate() { return GetData() != nullptr; }
|
||||
pointer Allocate(size_type capacity) {
|
||||
Pointer Allocate(SizeType capacity) {
|
||||
GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
|
||||
GetCapacity() = capacity;
|
||||
return GetData();
|
||||
|
@ -196,14 +202,15 @@ class AllocationTransaction {
|
|||
}
|
||||
|
||||
private:
|
||||
container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
|
||||
size_type capacity_ = 0;
|
||||
container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
|
||||
SizeType capacity_ = 0;
|
||||
};
|
||||
|
||||
template <typename AllocatorType>
|
||||
class ConstructionTransaction {
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using size_type = typename AllocatorType::size_type;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
using Pointer = typename AllocatorTraits::pointer;
|
||||
using SizeType = typename AllocatorTraits::size_type;
|
||||
|
||||
public:
|
||||
explicit ConstructionTransaction(AllocatorType* alloc_ptr)
|
||||
|
@ -220,12 +227,12 @@ class ConstructionTransaction {
|
|||
void operator=(const ConstructionTransaction&) = delete;
|
||||
|
||||
AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
|
||||
pointer& GetData() { return alloc_data_.template get<1>(); }
|
||||
size_type& GetSize() { return size_; }
|
||||
Pointer& GetData() { return alloc_data_.template get<1>(); }
|
||||
SizeType& GetSize() { return size_; }
|
||||
|
||||
bool DidConstruct() { return GetData() != nullptr; }
|
||||
template <typename ValueAdapter>
|
||||
void Construct(pointer data, ValueAdapter* values_ptr, size_type size) {
|
||||
void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) {
|
||||
inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
|
||||
data, values_ptr, size);
|
||||
GetData() = data;
|
||||
|
@ -237,28 +244,29 @@ class ConstructionTransaction {
|
|||
}
|
||||
|
||||
private:
|
||||
container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
|
||||
size_type size_ = 0;
|
||||
container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
|
||||
SizeType size_ = 0;
|
||||
};
|
||||
|
||||
template <typename T, size_t N, typename A>
|
||||
class Storage {
|
||||
public:
|
||||
using allocator_type = A;
|
||||
using value_type = typename allocator_type::value_type;
|
||||
using pointer = typename allocator_type::pointer;
|
||||
using const_pointer = typename allocator_type::const_pointer;
|
||||
using reference = typename allocator_type::reference;
|
||||
using const_reference = typename allocator_type::const_reference;
|
||||
using rvalue_reference = typename allocator_type::value_type&&;
|
||||
using size_type = typename allocator_type::size_type;
|
||||
using difference_type = typename allocator_type::difference_type;
|
||||
using AllocatorTraits = absl::allocator_traits<A>;
|
||||
using allocator_type = typename AllocatorTraits::allocator_type;
|
||||
using value_type = typename AllocatorTraits::value_type;
|
||||
using pointer = typename AllocatorTraits::pointer;
|
||||
using const_pointer = typename AllocatorTraits::const_pointer;
|
||||
using size_type = typename AllocatorTraits::size_type;
|
||||
using difference_type = typename AllocatorTraits::difference_type;
|
||||
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using RValueReference = value_type&&;
|
||||
using iterator = pointer;
|
||||
using const_iterator = const_pointer;
|
||||
using reverse_iterator = std::reverse_iterator<iterator>;
|
||||
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
||||
using MoveIterator = std::move_iterator<iterator>;
|
||||
using AllocatorTraits = absl::allocator_traits<allocator_type>;
|
||||
using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
|
||||
|
||||
using StorageView = inlined_vector_internal::StorageView<allocator_type>;
|
||||
|
|
|
@ -615,7 +615,7 @@ class raw_hash_set {
|
|||
|
||||
// PRECONDITION: not an end() iterator.
|
||||
reference operator*() const {
|
||||
/* To be enabled: assert_is_full(); */
|
||||
assert_is_full();
|
||||
return PolicyTraits::element(slot_);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
|
||||
#include <windows.h>
|
||||
|
||||
// MSVC header DbgHelp.h has a warning for an ignored typedef.
|
||||
// MSVC header dbghelp.h has a warning for an ignored typedef.
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable:4091)
|
||||
#include <DbgHelp.h>
|
||||
#include <dbghelp.h>
|
||||
#pragma warning(pop)
|
||||
|
||||
#pragma comment(lib, "dbghelp.lib")
|
||||
|
|
|
@ -67,17 +67,20 @@ namespace absl {
|
|||
template <typename T>
|
||||
using Flag = flags_internal::Flag<T>;
|
||||
#else
|
||||
// MSVC debug builds do not implement constexpr correctly for classes with
|
||||
// virtual methods. To work around this we adding level of indirection, so that
|
||||
// the class `absl::Flag` contains an `internal::Flag*` (instead of being an
|
||||
// alias to that class) and dynamically allocates an instance when necessary.
|
||||
// We also forward all calls to internal::Flag methods via trampoline methods.
|
||||
// In this setup the `absl::Flag` class does not have virtual methods and thus
|
||||
// MSVC is able to initialize it at link time. To deal with multiple threads
|
||||
// accessing the flag for the first time concurrently we use an atomic boolean
|
||||
// indicating if flag object is constructed. We also employ the double-checked
|
||||
// locking pattern where the second level of protection is a global Mutex, so
|
||||
// if two threads attempt to construct the flag concurrently only one wins.
|
||||
// MSVC debug builds do not implement initialization with constexpr constructors
|
||||
// correctly. To work around this we add a level of indirection, so that the
|
||||
// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias
|
||||
// to that class) and dynamically allocates an instance when necessary. We also
|
||||
// forward all calls to internal::Flag methods via trampoline methods. In this
|
||||
// setup the `absl::Flag` class does not have constructor and virtual methods,
|
||||
// all the data members are public and thus MSVC is able to initialize it at
|
||||
// link time. To deal with multiple threads accessing the flag for the first
|
||||
// time concurrently we use an atomic boolean indicating if flag object is
|
||||
// initialized. We also employ the double-checked locking pattern where the
|
||||
// second level of protection is a global Mutex, so if two threads attempt to
|
||||
// construct the flag concurrently only one wins.
|
||||
// This solution is based on a recomendation here:
|
||||
// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454
|
||||
|
||||
namespace flags_internal {
|
||||
absl::Mutex* GetGlobalConstructionGuard();
|
||||
|
@ -86,16 +89,23 @@ absl::Mutex* GetGlobalConstructionGuard();
|
|||
template <typename T>
|
||||
class Flag {
|
||||
public:
|
||||
// No constructor and destructor to ensure this is an aggregate type.
|
||||
// Visual Studio 2015 still requires the constructor for class to be
|
||||
// constexpr initializable.
|
||||
#if _MSC_VER <= 1900
|
||||
constexpr Flag(const char* name, const flags_internal::HelpGenFunc help_gen,
|
||||
const char* filename,
|
||||
const flags_internal::FlagMarshallingOpFn marshalling_op,
|
||||
const flags_internal::InitialValGenFunc initial_value_gen)
|
||||
const flags_internal::InitialValGenFunc initial_value_gen,
|
||||
bool, void*)
|
||||
: name_(name),
|
||||
help_gen_(help_gen),
|
||||
filename_(filename),
|
||||
marshalling_op_(marshalling_op),
|
||||
initial_value_gen_(initial_value_gen),
|
||||
inited_(false) {}
|
||||
inited_(false),
|
||||
impl_(nullptr) {}
|
||||
#endif
|
||||
|
||||
flags_internal::Flag<T>* GetImpl() const {
|
||||
if (!inited_.load(std::memory_order_acquire)) {
|
||||
|
@ -113,7 +123,8 @@ class Flag {
|
|||
return impl_;
|
||||
}
|
||||
|
||||
// absl::Flag API
|
||||
// Public methods of `absl::Flag<T>` are NOT part of the Abseil Flags API.
|
||||
// See https://abseil.io/docs/cpp/guides/flags
|
||||
bool IsRetired() const { return GetImpl()->IsRetired(); }
|
||||
bool IsAbseilFlag() const { return GetImpl()->IsAbseilFlag(); }
|
||||
absl::string_view Name() const { return GetImpl()->Name(); }
|
||||
|
@ -126,9 +137,9 @@ class Flag {
|
|||
std::string Filename() const { return GetImpl()->Filename(); }
|
||||
std::string DefaultValue() const { return GetImpl()->DefaultValue(); }
|
||||
std::string CurrentValue() const { return GetImpl()->CurrentValue(); }
|
||||
template <typename T1>
|
||||
template <typename U>
|
||||
inline bool IsOfType() const {
|
||||
return GetImpl()->template IsOfType<T1>();
|
||||
return GetImpl()->template IsOfType<U>();
|
||||
}
|
||||
T Get() const { return GetImpl()->Get(); }
|
||||
bool AtomicGet(T* v) const { return GetImpl()->AtomicGet(v); }
|
||||
|
@ -138,7 +149,8 @@ class Flag {
|
|||
}
|
||||
void InvokeCallback() { GetImpl()->InvokeCallback(); }
|
||||
|
||||
private:
|
||||
// The data members are logically private, but they need to be public for
|
||||
// this to be an aggregate type.
|
||||
const char* name_;
|
||||
const flags_internal::HelpGenFunc help_gen_;
|
||||
const char* filename_;
|
||||
|
@ -146,7 +158,7 @@ class Flag {
|
|||
const flags_internal::InitialValGenFunc initial_value_gen_;
|
||||
|
||||
mutable std::atomic<bool> inited_;
|
||||
mutable flags_internal::Flag<T>* impl_ = nullptr;
|
||||
mutable flags_internal::Flag<T>* impl_;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -310,17 +322,35 @@ void SetFlag(absl::Flag<T>* flag, const V& v) {
|
|||
// Note: Name of registrar object is not arbitrary. It is used to "grab"
|
||||
// global name for FLAGS_no<flag_name> symbol, thus preventing the possibility
|
||||
// of defining two flags with names foo and nofoo.
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
#define ABSL_FLAG_IMPL(Type, name, default_value, help) \
|
||||
namespace absl /* block flags in namespaces */ {} \
|
||||
ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
|
||||
ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \
|
||||
ABSL_CONST_INIT absl::Flag<Type> FLAGS_##name( \
|
||||
ABSL_CONST_INIT absl::Flag<Type> FLAGS_##name{ \
|
||||
ABSL_FLAG_IMPL_FLAGNAME(#name), &AbslFlagsWrapHelp##name, \
|
||||
ABSL_FLAG_IMPL_FILENAME(), \
|
||||
&absl::flags_internal::FlagMarshallingOps<Type>, \
|
||||
&AbslFlagsInitFlag##name); \
|
||||
&AbslFlagsInitFlag##name}; \
|
||||
extern bool FLAGS_no##name; \
|
||||
bool FLAGS_no##name = ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name)
|
||||
#else
|
||||
// MSVC version uses aggregate initialization.
|
||||
#define ABSL_FLAG_IMPL(Type, name, default_value, help) \
|
||||
namespace absl /* block flags in namespaces */ {} \
|
||||
ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
|
||||
ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \
|
||||
ABSL_CONST_INIT absl::Flag<Type> FLAGS_##name{ \
|
||||
ABSL_FLAG_IMPL_FLAGNAME(#name), \
|
||||
&AbslFlagsWrapHelp##name, \
|
||||
ABSL_FLAG_IMPL_FILENAME(), \
|
||||
&absl::flags_internal::FlagMarshallingOps<Type>, \
|
||||
&AbslFlagsInitFlag##name, \
|
||||
false, \
|
||||
nullptr}; \
|
||||
extern bool FLAGS_no##name; \
|
||||
bool FLAGS_no##name = ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name)
|
||||
#endif
|
||||
|
||||
// ABSL_RETIRED_FLAG
|
||||
//
|
||||
|
|
|
@ -92,11 +92,12 @@ struct MakeUniqueResult<T[N]> {
|
|||
|
||||
} // namespace memory_internal
|
||||
|
||||
// gcc 4.8 has __cplusplus at 201301 but doesn't define make_unique. Other
|
||||
// supported compilers either just define __cplusplus as 201103 but have
|
||||
// make_unique (msvc), or have make_unique whenever __cplusplus > 201103 (clang)
|
||||
// gcc 4.8 has __cplusplus at 201301 but the libstdc++ shipped with it doesn't
|
||||
// define make_unique. Other supported compilers either just define __cplusplus
|
||||
// as 201103 but have make_unique (msvc), or have make_unique whenever
|
||||
// __cplusplus > 201103 (clang).
|
||||
#if (__cplusplus > 201103L || defined(_MSC_VER)) && \
|
||||
!(defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8)
|
||||
!(defined(__GLIBCXX__) && !defined(__cpp_lib_make_unique))
|
||||
using std::make_unique;
|
||||
#else
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -37,7 +37,7 @@ static base_internal::ThreadIdentity* thread_identity_freelist;
|
|||
|
||||
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
|
||||
// Since we must preserve their storage we cache them for re-use.
|
||||
static void ReclaimThreadIdentity(void* v) {
|
||||
void ReclaimThreadIdentity(void* v) {
|
||||
base_internal::ThreadIdentity* identity =
|
||||
static_cast<base_internal::ThreadIdentity*>(v);
|
||||
|
||||
|
@ -47,6 +47,8 @@ static void ReclaimThreadIdentity(void* v) {
|
|||
base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
|
||||
}
|
||||
|
||||
PerThreadSem::Destroy(identity);
|
||||
|
||||
// We must explicitly clear the current thread's identity:
|
||||
// (a) Subsequent (unrelated) per-thread destructors may require an identity.
|
||||
// We must guarantee a new identity is used in this case (this instructor
|
||||
|
|
|
@ -35,6 +35,10 @@ namespace synchronization_internal {
|
|||
// For private use only.
|
||||
base_internal::ThreadIdentity* CreateThreadIdentity();
|
||||
|
||||
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
|
||||
// For private use only.
|
||||
void ReclaimThreadIdentity(void* v);
|
||||
|
||||
// Returns the ThreadIdentity object representing the calling thread; guaranteed
|
||||
// to be unique for its lifetime. The returned object will remain valid for the
|
||||
// program's lifetime; although it may be re-assigned to a subsequent thread.
|
||||
|
|
|
@ -40,12 +40,16 @@ std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
|
|||
}
|
||||
|
||||
void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
|
||||
Waiter::GetWaiter(identity)->Init();
|
||||
new (Waiter::GetWaiter(identity)) Waiter();
|
||||
identity->ticker.store(0, std::memory_order_relaxed);
|
||||
identity->wait_start.store(0, std::memory_order_relaxed);
|
||||
identity->is_idle.store(false, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
|
||||
Waiter::GetWaiter(identity)->~Waiter();
|
||||
}
|
||||
|
||||
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
|
||||
const int ticker =
|
||||
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
|
||||
|
|
|
@ -65,6 +65,10 @@ class PerThreadSem {
|
|||
// REQUIRES: May only be called by ThreadIdentity.
|
||||
static void Init(base_internal::ThreadIdentity* identity);
|
||||
|
||||
// Destroy the PerThreadSem associated with "identity".
|
||||
// REQUIRES: May only be called by ThreadIdentity.
|
||||
static void Destroy(base_internal::ThreadIdentity* identity);
|
||||
|
||||
// Increments "identity"'s count.
|
||||
static inline void Post(base_internal::ThreadIdentity* identity);
|
||||
|
||||
|
@ -77,6 +81,7 @@ class PerThreadSem {
|
|||
friend class PerThreadSemTest;
|
||||
friend class absl::Mutex;
|
||||
friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
|
||||
friend void ReclaimThreadIdentity(void* v);
|
||||
};
|
||||
|
||||
} // namespace synchronization_internal
|
||||
|
|
|
@ -122,10 +122,12 @@ class Futex {
|
|||
}
|
||||
};
|
||||
|
||||
void Waiter::Init() {
|
||||
Waiter::Waiter() {
|
||||
futex_.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
Waiter::~Waiter() = default;
|
||||
|
||||
bool Waiter::Wait(KernelTimeout t) {
|
||||
// Loop until we can atomically decrement futex from a positive
|
||||
// value, waiting on a futex while we believe it is zero.
|
||||
|
@ -199,7 +201,7 @@ class PthreadMutexHolder {
|
|||
pthread_mutex_t *mu_;
|
||||
};
|
||||
|
||||
void Waiter::Init() {
|
||||
Waiter::Waiter() {
|
||||
const int err = pthread_mutex_init(&mu_, 0);
|
||||
if (err != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
|
||||
|
@ -214,6 +216,18 @@ void Waiter::Init() {
|
|||
wakeup_count_ = 0;
|
||||
}
|
||||
|
||||
Waiter::~Waiter() {
|
||||
const int err = pthread_mutex_destroy(&mu_);
|
||||
if (err != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
|
||||
}
|
||||
|
||||
const int err2 = pthread_cond_destroy(&cv_);
|
||||
if (err2 != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
|
||||
}
|
||||
}
|
||||
|
||||
bool Waiter::Wait(KernelTimeout t) {
|
||||
struct timespec abs_timeout;
|
||||
if (t.has_timeout()) {
|
||||
|
@ -274,13 +288,19 @@ void Waiter::InternalCondVarPoke() {
|
|||
|
||||
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
|
||||
|
||||
void Waiter::Init() {
|
||||
Waiter::Waiter() {
|
||||
if (sem_init(&sem_, 0, 0) != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
|
||||
}
|
||||
wakeups_.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
Waiter::~Waiter() {
|
||||
if (sem_destroy(&sem_) != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
|
||||
}
|
||||
}
|
||||
|
||||
bool Waiter::Wait(KernelTimeout t) {
|
||||
struct timespec abs_timeout;
|
||||
if (t.has_timeout()) {
|
||||
|
@ -388,39 +408,32 @@ class LockHolder {
|
|||
SRWLOCK* mu_;
|
||||
};
|
||||
|
||||
void Waiter::Init() {
|
||||
Waiter::Waiter() {
|
||||
auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
|
||||
auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
|
||||
InitializeSRWLock(mu);
|
||||
InitializeConditionVariable(cv);
|
||||
waiter_count_.store(0, std::memory_order_relaxed);
|
||||
wakeup_count_.store(0, std::memory_order_relaxed);
|
||||
waiter_count_ = 0;
|
||||
wakeup_count_ = 0;
|
||||
}
|
||||
|
||||
// SRW locks and condition variables do not need to be explicitly destroyed.
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
|
||||
// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
|
||||
Waiter::~Waiter() = default;
|
||||
|
||||
bool Waiter::Wait(KernelTimeout t) {
|
||||
SRWLOCK *mu = WinHelper::GetLock(this);
|
||||
CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
|
||||
|
||||
LockHolder h(mu);
|
||||
waiter_count_.fetch_add(1, std::memory_order_relaxed);
|
||||
++waiter_count_;
|
||||
|
||||
// Loop until we find a wakeup to consume or timeout.
|
||||
// Note that, since the thread ticker is just reset, we don't need to check
|
||||
// whether the thread is idle on the very first pass of the loop.
|
||||
bool first_pass = true;
|
||||
while (true) {
|
||||
int x = wakeup_count_.load(std::memory_order_relaxed);
|
||||
if (x != 0) {
|
||||
if (!wakeup_count_.compare_exchange_weak(x, x - 1,
|
||||
std::memory_order_acquire,
|
||||
std::memory_order_relaxed)) {
|
||||
continue; // Raced with someone, retry.
|
||||
}
|
||||
// Successfully consumed a wakeup, we're done.
|
||||
waiter_count_.fetch_sub(1, std::memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
|
||||
while (wakeup_count_ == 0) {
|
||||
if (!first_pass) MaybeBecomeIdle();
|
||||
// No wakeups available, time to wait.
|
||||
if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
|
||||
|
@ -429,7 +442,7 @@ bool Waiter::Wait(KernelTimeout t) {
|
|||
// initialization guarantees this is not a narrowing conversion.
|
||||
const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
|
||||
if (err == ERROR_TIMEOUT) {
|
||||
waiter_count_.fetch_sub(1, std::memory_order_relaxed);
|
||||
--waiter_count_;
|
||||
return false;
|
||||
} else {
|
||||
ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
|
||||
|
@ -437,23 +450,27 @@ bool Waiter::Wait(KernelTimeout t) {
|
|||
}
|
||||
first_pass = false;
|
||||
}
|
||||
// Consume a wakeup and we're done.
|
||||
--wakeup_count_;
|
||||
--waiter_count_;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Waiter::Post() {
|
||||
wakeup_count_.fetch_add(1, std::memory_order_release);
|
||||
Poke();
|
||||
LockHolder h(WinHelper::GetLock(this));
|
||||
++wakeup_count_;
|
||||
InternalCondVarPoke();
|
||||
}
|
||||
|
||||
void Waiter::Poke() {
|
||||
if (waiter_count_.load(std::memory_order_relaxed) == 0) {
|
||||
return;
|
||||
}
|
||||
// Potentially a waiter. Take the lock and check again.
|
||||
LockHolder h(WinHelper::GetLock(this));
|
||||
if (waiter_count_.load(std::memory_order_relaxed) == 0) {
|
||||
return;
|
||||
InternalCondVarPoke();
|
||||
}
|
||||
|
||||
void Waiter::InternalCondVarPoke() {
|
||||
if (waiter_count_ != 0) {
|
||||
WakeConditionVariable(WinHelper::GetCond(this));
|
||||
}
|
||||
WakeConditionVariable(WinHelper::GetCond(this));
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -58,14 +58,15 @@ namespace synchronization_internal {
|
|||
// Waiter is an OS-specific semaphore.
|
||||
class Waiter {
|
||||
public:
|
||||
// No constructor, instances use the reserved space in ThreadIdentity.
|
||||
// All initialization logic belongs in `Init()`.
|
||||
Waiter() = delete;
|
||||
// Prepare any data to track waits.
|
||||
Waiter();
|
||||
|
||||
// Not copyable or movable
|
||||
Waiter(const Waiter&) = delete;
|
||||
Waiter& operator=(const Waiter&) = delete;
|
||||
|
||||
// Prepare any data to track waits.
|
||||
void Init();
|
||||
// Destroy any data to track waits.
|
||||
~Waiter();
|
||||
|
||||
// Blocks the calling thread until a matching call to `Post()` or
|
||||
// `t` has passed. Returns `true` if woken (`Post()` called),
|
||||
|
@ -122,13 +123,8 @@ class Waiter {
|
|||
std::atomic<int> wakeups_;
|
||||
|
||||
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
|
||||
// The Windows API has lots of choices for synchronization
|
||||
// primivitives. We are using SRWLOCK and CONDITION_VARIABLE
|
||||
// because they don't require a destructor to release system
|
||||
// resources.
|
||||
//
|
||||
// However, we can't include Windows.h in our headers, so we use aligned
|
||||
// storage buffers to define the storage.
|
||||
// We can't include Windows.h in our headers, so we use aligned storage
|
||||
// buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
|
||||
using SRWLockStorage =
|
||||
typename std::aligned_storage<sizeof(void*), alignof(void*)>::type;
|
||||
using ConditionVariableStorage =
|
||||
|
@ -138,10 +134,13 @@ class Waiter {
|
|||
// condition variable storage once the types are complete.
|
||||
class WinHelper;
|
||||
|
||||
// REQUIRES: WinHelper::GetLock(this) must be held.
|
||||
void InternalCondVarPoke();
|
||||
|
||||
SRWLockStorage mu_storage_;
|
||||
ConditionVariableStorage cv_storage_;
|
||||
std::atomic<int> waiter_count_;
|
||||
std::atomic<int> wakeup_count_;
|
||||
int waiter_count_;
|
||||
int wakeup_count_;
|
||||
|
||||
#else
|
||||
#error Unknown ABSL_WAITER_MODE
|
||||
|
|
Loading…
Reference in a new issue