Merge branch 'master' into master

This commit is contained in:
Gennadiy Civil 2017-10-30 10:56:35 -04:00 committed by GitHub
commit 200b5a7cb0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
38 changed files with 461 additions and 191 deletions

View file

@ -1,8 +0,0 @@
// This is a relaxed JSON format, you can have comments in it.
// This is a list of configuration for the job that does not specify a configuration.
[
{"node": "linux-x86_64"},
{"node": "ubuntu_16.04-x86_64"},
{"node": "darwin-x86_64"},
{"node": "windows-x86_64"}
]

4
.clang-format Normal file
View file

@ -0,0 +1,4 @@
---
Language: Cpp
BasedOnStyle: Google
...

View file

@ -47,7 +47,8 @@ will be expected to conform to the style outlined
made and **why** it was made. Link to a GitHub issue if it exists.
* Don't fix code style and formatting unless you are already changing that
line to address an issue. PRs with irrelevant changes won't be merged. If
line to address an issue. Formatting of modified lines may be done using
`git clang-format`. PRs with irrelevant changes won't be merged. If
you do want to fix formatting or style, do that in a separate PR.
* Unless your PR is trivial, you should expect there will be reviewer comments

View file

@ -90,7 +90,3 @@ For more information about Abseil:
## Build with CMake
Please check the [CMake build instructions](CMake/README.md)
## Disclaimer
* This is not an official Google product.

View file

@ -23,6 +23,7 @@ config_setting(
values = {
"compiler": "llvm",
},
visibility = [":__subpackages__"],
)
# following configs are based on mapping defined in: https://git.io/v5Ijz
@ -31,6 +32,7 @@ config_setting(
values = {
"cpu": "darwin",
},
visibility = [":__subpackages__"],
)
config_setting(
@ -38,6 +40,7 @@ config_setting(
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
config_setting(
@ -45,4 +48,5 @@ config_setting(
values = {
"cpu": "ppc",
},
visibility = [":__subpackages__"],
)

View file

@ -25,11 +25,10 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
exports_files(["thread_annotations.h"])
cc_library(
name = "spinlock_wait",
srcs = [
"internal/spinlock_akaros.inc",
"internal/spinlock_posix.inc",
"internal/spinlock_wait.cc",
"internal/spinlock_win32.inc",
@ -39,6 +38,9 @@ cc_library(
"internal/spinlock_wait.h",
],
copts = ABSL_DEFAULT_COPTS,
visibility = [
"//absl/base:__pkg__",
],
deps = [":core_headers"],
)
@ -83,6 +85,9 @@ cc_library(
"internal/malloc_extension_c.h",
],
copts = ABSL_DEFAULT_COPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
":core_headers",
":dynamic_annotations",
@ -108,6 +113,9 @@ cc_library(
textual_hdrs = [
"internal/malloc_hook_invoke.h",
],
visibility = [
"//absl:__subpackages__",
],
deps = [
":base",
":config",
@ -124,6 +132,9 @@ cc_library(
"internal/invoke.h",
],
copts = ABSL_DEFAULT_COPTS,
visibility = [
"//absl:__subpackages__",
],
)
cc_library(
@ -183,6 +194,9 @@ cc_library(
features = [
"-use_header_modules",
],
visibility = [
"//absl:__subpackages__",
],
deps = [
":base",
":config",
@ -205,6 +219,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/exception_testing.h"],
copts = ABSL_TEST_COPTS,
visibility = ["//absl:__subpackages__"],
deps = [
":config",
"@com_google_googletest//:gtest",

View file

@ -372,4 +372,19 @@
#endif
#endif
// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
// the support for <optional>, <any>, <string_view>. So we use _MSC_VER to check
// whether we have VS 2017 RTM (when <optional>, <any>, <string_view> is
// implemented) or higher.
// Also, `__cplusplus` is not correctly set by MSVC, so we use `_MSVC_LANG` to
// check the language version.
// TODO(zhangxy): fix tests before enabling aliasing for `std::any`,
// `std::string_view`.
#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402)
// #define ABSL_HAVE_STD_ANY 1
#define ABSL_HAVE_STD_OPTIONAL 1
// #define ABSL_HAVE_STD_STRING_VIEW 1
#endif
#endif // ABSL_BASE_CONFIG_H_

View file

@ -30,6 +30,7 @@
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#ifndef _WIN32
#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>

View file

@ -29,6 +29,13 @@ namespace base_internal {
SysAllocator::~SysAllocator() {}
void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; }
// Dummy key method to avoid weak vtable.
void MallocExtensionWriter::UnusedKeyMethod() {}
void StringMallocExtensionWriter::Write(const char* buf, int len) {
out_->append(buf, len);
}
// Default implementation -- does nothing
MallocExtension::~MallocExtension() { }
bool MallocExtension::VerifyAllMemory() { return true; }

View file

@ -388,6 +388,9 @@ class MallocExtensionWriter {
MallocExtensionWriter() {}
MallocExtensionWriter(const MallocExtensionWriter&) = delete;
MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete;
private:
virtual void UnusedKeyMethod(); // Dummy key method to avoid weak vtable.
};
// A subclass that writes to the std::string "out". NOTE: The generated
@ -396,9 +399,7 @@ class MallocExtensionWriter {
class StringMallocExtensionWriter : public MallocExtensionWriter {
public:
explicit StringMallocExtensionWriter(std::string* out) : out_(out) {}
virtual void Write(const char* buf, int len) {
out_->append(buf, len);
}
void Write(const char* buf, int len) override;
private:
std::string* const out_;

View file

@ -0,0 +1,35 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file is an Akaros-specific part of spinlock_wait.cc
#include <atomic>
#include "absl/base/internal/scheduling_mode.h"
extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
// In Akaros, one must take care not to call anything that could cause a
// malloc(), a blocking system call, or a uthread_yield() while holding a
// spinlock. Our callers assume will not call into libraries or other
// arbitrary code.
}
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"

View file

@ -23,6 +23,8 @@
#if defined(_WIN32)
#include "absl/base/internal/spinlock_win32.inc"
#elif defined(__akaros__)
#include "absl/base/internal/spinlock_akaros.inc"
#else
#include "absl/base/internal/spinlock_posix.inc"
#endif

View file

@ -284,6 +284,30 @@ pid_t GetTID() {
return syscall(SYS_gettid);
}
#elif defined(__akaros__)
pid_t GetTID() {
// Akaros has a concept of "vcore context", which is the state the program
// is forced into when we need to make a user-level scheduling decision, or
// run a signal handler. This is analogous to the interrupt context that a
// CPU might enter if it encounters some kind of exception.
//
// There is no current thread context in vcore context, but we need to give
// a reasonable answer if asked for a thread ID (e.g., in a signal handler).
// Thread 0 always exists, so if we are in vcore context, we return that.
//
// Otherwise, we know (since we are using pthreads) that the uthread struct
// current_uthread is pointing to is the first element of a
// struct pthread_tcb, so we extract and return the thread ID from that.
//
// TODO(dcross): Akaros anticipates moving the thread ID to the uthread
// structure at some point. We should modify this code to remove the cast
// when that happens.
if (in_vcore_context())
return 0;
return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
}
#else
// Fallback implementation of GetTID using pthread_getspecific.

View file

@ -19,7 +19,22 @@
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#ifdef THREAD_SANITIZER
// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
// Macro intended only for internal use.
//
// Checks whether LLVM Thread Sanitizer interfaces are available.
// First made available in LLVM 5.0 (Sep 2017).
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
#endif
#if defined(THREAD_SANITIZER) && defined(__has_include)
#if __has_include(<sanitizer/tsan_interface.h>)
#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
#endif
#endif
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
#include <sanitizer/tsan_interface.h>
#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create

View file

@ -29,6 +29,7 @@
#ifndef ABSL_BASE_MACROS_H_
#define ABSL_BASE_MACROS_H_
#include <cassert>
#include <cstddef>
#include "absl/base/port.h"

View file

@ -112,6 +112,9 @@ cc_library(
srcs = ["internal/test_instance_tracker.cc"],
hdrs = ["internal/test_instance_tracker.h"],
copts = ABSL_DEFAULT_COPTS,
visibility = [
"//absl:__subpackages__",
],
)
cc_test(

View file

@ -82,7 +82,8 @@ class InlinedVector {
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
InlinedVector() noexcept(noexcept(allocator_type()))
InlinedVector() noexcept(
std::is_nothrow_default_constructible<allocator_type>::value)
: allocator_and_tag_(allocator_type()) {}
explicit InlinedVector(const allocator_type& alloc) noexcept
@ -148,6 +149,9 @@ class InlinedVector {
~InlinedVector() { clear(); }
InlinedVector& operator=(const InlinedVector& v) {
if (this == &v) {
return *this;
}
// Optimized to avoid reallocation.
// Prefer reassignment to copy construction for elements.
if (size() < v.size()) { // grow
@ -680,6 +684,8 @@ class InlinedVector {
// portion and the start of the uninitialized portion of the created gap.
// The number of initialized spots is pair.second - pair.first;
// the number of raw spots is n - (pair.second - pair.first).
//
// Updates the size of the InlinedVector internally.
std::pair<iterator, iterator> ShiftRight(const_iterator position,
size_type n);
@ -1013,28 +1019,19 @@ typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::emplace(
emplace_back(std::forward<Args>(args)...);
return end() - 1;
}
size_type s = size();
size_type idx = std::distance(cbegin(), position);
if (s == capacity()) {
EnlargeBy(1);
}
assert(s < capacity());
iterator pos = begin() + idx; // Set 'pos' to a post-enlarge iterator.
pointer space;
if (allocated()) {
tag().set_allocated_size(s + 1);
space = allocated_space();
T new_t = T(std::forward<Args>(args)...);
auto range = ShiftRight(position, 1);
if (range.first == range.second) {
// constructing into uninitialized memory
Construct(range.first, std::move(new_t));
} else {
tag().set_inline_size(s + 1);
space = inlined_space();
// assigning into moved-from object
*range.first = T(std::move(new_t));
}
Construct(space + s, std::move(space[s - 1]));
std::move_backward(pos, space + s - 1, space + s);
Destroy(pos, pos + 1);
Construct(pos, std::forward<Args>(args)...);
return pos;
return range.first;
}
template <typename T, size_t N, typename A>
@ -1219,6 +1216,7 @@ auto InlinedVector<T, N, A>::ShiftRight(const_iterator position, size_type n)
start_used = pos;
start_raw = pos + new_elements_in_used_space;
}
tag().add_size(n);
return std::make_pair(start_used, start_raw);
}
@ -1297,10 +1295,12 @@ auto InlinedVector<T, N, A>::InsertWithCount(const_iterator position,
-> iterator {
assert(position >= begin() && position <= end());
if (n == 0) return const_cast<iterator>(position);
value_type copy = v;
std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
std::fill(it_pair.first, it_pair.second, v);
UninitializedFill(it_pair.second, it_pair.first + n, v);
tag().add_size(n);
std::fill(it_pair.first, it_pair.second, copy);
UninitializedFill(it_pair.second, it_pair.first + n, copy);
return it_pair.first;
}
@ -1336,7 +1336,6 @@ auto InlinedVector<T, N, A>::InsertWithRange(const_iterator position,
ForwardIter open_spot = std::next(first, used_spots);
std::copy(first, open_spot, it_pair.first);
UninitializedCopy(open_spot, last, it_pair.second);
tag().add_size(n);
return it_pair.first;
}

View file

@ -14,6 +14,7 @@
#include "absl/container/inlined_vector.h"
#include <algorithm>
#include <forward_list>
#include <list>
#include <memory>
@ -569,6 +570,16 @@ TEST(IntVec, CopyConstructorAndAssignment) {
}
}
TEST(IntVec, AliasingCopyAssignment) {
for (int len = 0; len < 20; ++len) {
IntVec original;
Fill(&original, len);
IntVec dup = original;
dup = dup;
EXPECT_EQ(dup, original);
}
}
TEST(IntVec, MoveConstructorAndAssignment) {
for (int len = 0; len < 20; len++) {
IntVec v_in;
@ -606,6 +617,78 @@ TEST(IntVec, MoveConstructorAndAssignment) {
}
}
class NotTriviallyDestructible {
public:
NotTriviallyDestructible() : p_(new int(1)) {}
explicit NotTriviallyDestructible(int i) : p_(new int(i)) {}
NotTriviallyDestructible(const NotTriviallyDestructible& other)
: p_(new int(*other.p_)) {}
NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) {
p_ = absl::make_unique<int>(*other.p_);
return *this;
}
bool operator==(const NotTriviallyDestructible& other) const {
return *p_ == *other.p_;
}
private:
std::unique_ptr<int> p_;
};
TEST(AliasingTest, Emplace) {
for (int i = 2; i < 20; ++i) {
absl::InlinedVector<NotTriviallyDestructible, 10> vec;
for (int j = 0; j < i; ++j) {
vec.push_back(NotTriviallyDestructible(j));
}
vec.emplace(vec.begin(), vec[0]);
EXPECT_EQ(vec[0], vec[1]);
vec.emplace(vec.begin() + i / 2, vec[i / 2]);
EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]);
vec.emplace(vec.end() - 1, vec.back());
EXPECT_EQ(vec[vec.size() - 2], vec.back());
}
}
TEST(AliasingTest, InsertWithCount) {
for (int i = 1; i < 20; ++i) {
absl::InlinedVector<NotTriviallyDestructible, 10> vec;
for (int j = 0; j < i; ++j) {
vec.push_back(NotTriviallyDestructible(j));
}
for (int n = 0; n < 5; ++n) {
// We use back where we can because it's guaranteed to become invalidated
vec.insert(vec.begin(), n, vec.back());
auto b = vec.begin();
EXPECT_TRUE(
std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) {
return x == vec.back();
}));
auto m_idx = vec.size() / 2;
vec.insert(vec.begin() + m_idx, n, vec.back());
auto m = vec.begin() + m_idx;
EXPECT_TRUE(
std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) {
return x == vec.back();
}));
// We want distinct values so the equality test is meaningful,
// vec[vec.size() - 1] is also almost always invalidated.
auto old_e = vec.size() - 1;
auto val = vec[old_e];
vec.insert(vec.end(), n, vec[old_e]);
auto e = vec.begin() + old_e;
EXPECT_TRUE(std::all_of(
e, e + n,
[&val](const NotTriviallyDestructible& x) { return x == val; }));
}
}
}
TEST(OverheadTest, Storage) {
// Check for size overhead.
// In particular, ensure that std::allocator doesn't cost anything to store.

View file

@ -75,8 +75,9 @@ const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset,
} // namespace
const void *const ElfMemImage::kInvalidBase =
reinterpret_cast<const void *>(~0L);
// The value of this variable doesn't matter; it's used only for its
// unique address.
const int ElfMemImage::kInvalidBaseSentinel = 0;
ElfMemImage::ElfMemImage(const void *base) {
ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");

View file

@ -43,9 +43,14 @@ namespace debug_internal {
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
private:
// Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
static const int kInvalidBaseSentinel;
public:
// Sentinel: there could never be an elf image at this address.
static const void *const kInvalidBase;
static constexpr const void *const kInvalidBase =
static_cast<const void*>(&kInvalidBaseSentinel);
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.

View file

@ -114,7 +114,9 @@ static const int kMaxFrameBytes = 100000;
// vuc is a ucontext_t *. We use void* to avoid the use
// of ucontext_t on non-POSIX systems.
static uintptr_t GetFP(const void *vuc) {
#if defined(__linux__)
#if !defined(__linux__)
static_cast<void>(vuc); // Avoid an unused argument compiler warning.
#else
if (vuc != nullptr) {
auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
#if defined(__i386__)

View file

@ -20,10 +20,15 @@
#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
#include <errno.h>
#include <fcntl.h>
#include <sys/syscall.h>
#include <unistd.h>
#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval.
#include <sys/auxv.h>
#endif
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/port.h"
@ -35,8 +40,10 @@
namespace absl {
namespace debug_internal {
ABSL_CONST_INIT
std::atomic<const void *> VDSOSupport::vdso_base_(
debug_internal::ElfMemImage::kInvalidBase);
std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
@ -56,37 +63,44 @@ VDSOSupport::VDSOSupport()
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
if (vdso_base_.load(std::memory_order_relaxed) ==
debug_internal::ElfMemImage::kInvalidBase) {
{
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
// on stack, and so glibc works as if VDSO was not present.
// But going directly to kernel via /proc/self/auxv below bypasses
// Valgrind zapping. So we check for Valgrind separately.
if (RunningOnValgrind()) {
vdso_base_.store(nullptr, std::memory_order_relaxed);
getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
return nullptr;
}
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd == -1) {
// Kernel too old to have a VDSO.
vdso_base_.store(nullptr, std::memory_order_relaxed);
getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
return nullptr;
}
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
std::memory_order_relaxed);
break;
}
}
close(fd);
const auto kInvalidBase = debug_internal::ElfMemImage::kInvalidBase;
#if __GLIBC_PREREQ(2, 16)
if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
errno = 0;
const void *const sysinfo_ehdr =
reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
if (errno == 0) {
vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
}
if (vdso_base_.load(std::memory_order_relaxed) ==
debug_internal::ElfMemImage::kInvalidBase) {
}
#endif // __GLIBC_PREREQ(2, 16)
if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
// on stack, and so glibc works as if VDSO was not present.
// But going directly to kernel via /proc/self/auxv below bypasses
// Valgrind zapping. So we check for Valgrind separately.
if (RunningOnValgrind()) {
vdso_base_.store(nullptr, std::memory_order_relaxed);
getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
return nullptr;
}
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd == -1) {
// Kernel too old to have a VDSO.
vdso_base_.store(nullptr, std::memory_order_relaxed);
getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
return nullptr;
}
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
std::memory_order_relaxed);
break;
}
}
close(fd);
if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_.store(nullptr, std::memory_order_relaxed);
}
@ -135,6 +149,7 @@ long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int)
return syscall(SYS_getcpu, cpu, nullptr, nullptr);
#else
// x86_64 never implemented sys_getcpu(), except as a VDSO call.
static_cast<void>(cpu); // Avoid an unused argument compiler warning.
errno = ENOSYS;
return -1;
#endif

View file

@ -38,8 +38,8 @@ namespace absl {
// Function Template: WrapUnique()
// -----------------------------------------------------------------------------
//
// Transfers ownership of a raw pointer to a `std::unique_ptr`. The returned
// value is a `std::unique_ptr` of deduced type.
// Adopts ownership from a raw pointer and transfers it to the returned
// `std::unique_ptr`, whose type is deduced.
//
// Example:
// X* NewX(int, int);
@ -81,6 +81,9 @@ struct MakeUniqueResult<T[N]> {
} // namespace memory_internal
#if __cplusplus >= 201402L || defined(_MSC_VER)
using std::make_unique;
#else
// -----------------------------------------------------------------------------
// Function Template: make_unique<T>()
// -----------------------------------------------------------------------------
@ -164,13 +167,14 @@ typename memory_internal::MakeUniqueResult<T>::array make_unique(size_t n) {
template <typename T, typename... Args>
typename memory_internal::MakeUniqueResult<T>::invalid make_unique(
Args&&... /* args */) = delete;
#endif
// -----------------------------------------------------------------------------
// Function Template: RawPtr()
// -----------------------------------------------------------------------------
//
// Extracts the raw pointer from a pointer-like 'ptr'. `absl::RawPtr` is useful
// within templates that need to handle a complement of raw pointers,
// Extracts the raw pointer from a pointer-like value `ptr`. `absl::RawPtr` is
// useful within templates that need to handle a complement of raw pointers,
// `std::nullptr_t`, and smart pointers.
template <typename T>
auto RawPtr(T&& ptr) -> decltype(&*ptr) {
@ -183,9 +187,9 @@ inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; }
// Function Template: ShareUniquePtr()
// -----------------------------------------------------------------------------
//
// Transforms a `std::unique_ptr` rvalue into a `std::shared_ptr`. The returned
// value is a `std::shared_ptr` of deduced type and ownership is transferred to
// the shared pointer.
// Adopts a `std::unique_ptr` rvalue and returns a `std::shared_ptr` of deduced
// type. Ownership (if any) of the held value is transferred to the returned
// shared pointer.
//
// Example:
//
@ -194,8 +198,11 @@ inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; }
// CHECK_EQ(*sp, 10);
// CHECK(up == nullptr);
//
// Note that this conversion is correct even when T is an array type, although
// the resulting shared pointer may not be very useful.
// Note that this conversion is correct even when T is an array type, and more
// generally it works for *any* deleter of the `unique_ptr` (single-object
// deleter, array deleter, or any custom deleter), since the deleter is adopted
// by the shared pointer as well. The deleter is copied (unless it is a
// reference).
//
// Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a
// null shared pointer does not attempt to call the deleter.

View file

@ -138,6 +138,16 @@ TEST(Make_UniqueTest, Array) {
EXPECT_THAT(ArrayWatch::allocs(), ElementsAre(5 * sizeof(ArrayWatch)));
}
TEST(Make_UniqueTest, NotAmbiguousWithStdMakeUnique) {
// Ensure that absl::make_unique is not ambiguous with std::make_unique.
// In C++14 mode, the below call to make_unique has both types as candidates.
struct TakesStdType {
explicit TakesStdType(const std::vector<int> &vec) {}
};
using absl::make_unique;
make_unique<TakesStdType>(std::vector<int>());
}
#if 0
// TODO(billydonahue): Make a proper NC test.
// These tests shouldn't compile.

View file

@ -103,6 +103,7 @@ cc_test(
size = "small",
srcs = ["match_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
@ -117,6 +118,7 @@ cc_test(
"internal/escaping_test_common.inc",
],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -130,6 +132,7 @@ cc_test(
size = "small",
srcs = ["ascii_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -145,6 +148,7 @@ cc_test(
"internal/memutil_test.cc",
],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -159,6 +163,7 @@ cc_test(
"internal/utf8_test.cc",
],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":internal",
":strings",
@ -172,6 +177,7 @@ cc_test(
size = "small",
srcs = ["string_view_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:config",
@ -186,6 +192,7 @@ cc_test(
size = "small",
srcs = ["substitute_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -198,6 +205,7 @@ cc_test(
size = "small",
srcs = ["str_replace_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
@ -208,6 +216,7 @@ cc_test(
name = "str_split_test",
srcs = ["str_split_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -221,6 +230,7 @@ cc_test(
size = "small",
srcs = ["internal/ostringstream_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":internal",
"@com_google_googletest//:gtest_main",
@ -235,6 +245,7 @@ cc_test(
"internal/resize_uninitialized_test.cc",
],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
@ -247,6 +258,7 @@ cc_test(
size = "small",
srcs = ["str_join_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -260,6 +272,7 @@ cc_test(
size = "small",
srcs = ["str_cat_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@ -278,6 +291,7 @@ cc_test(
tags = [
"no_test_loonix",
],
visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base",
@ -291,6 +305,7 @@ cc_test(
size = "small",
srcs = ["strip_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",

View file

@ -25,7 +25,6 @@
#include <cstddef>
#include <cstdint>
namespace absl {
namespace strings_internal {

View file

@ -53,7 +53,7 @@ inline bool StrContains(absl::string_view haystack, absl::string_view needle) {
inline bool StartsWith(absl::string_view text, absl::string_view prefix) {
return prefix.empty() ||
(text.size() >= prefix.size() &&
memcmp(text.data(), prefix.data(), prefix.size()) == 0);
memcmp(text.data(), prefix.data(), prefix.size()) == 0);
}
// EndsWith()
@ -63,7 +63,8 @@ inline bool EndsWith(absl::string_view text, absl::string_view suffix) {
return suffix.empty() ||
(text.size() >= suffix.size() &&
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
suffix.size()) == 0);
suffix.size()) == 0
);
}
// StartsWithIgnoreCase()

View file

@ -62,9 +62,9 @@ ABSL_MUST_USE_RESULT bool SimpleAtod(absl::string_view str, double* value);
// SimpleAtob()
//
// Converts the given std::string into into a boolean, returning `true` if
// successful. The following case-insensitive strings are interpreted as boolean
// `true`: "true", "t", "yes", "y", "1". The following case-insensitive strings
// Converts the given std::string into a boolean, returning `true` if successful.
// The following case-insensitive strings are interpreted as boolean `true`:
// "true", "t", "yes", "y", "1". The following case-insensitive strings
// are interpreted as boolean `false`: "false", "f", "no", "n", "0".
ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str, bool* value);

View file

@ -295,9 +295,8 @@ class string_view {
// string_view::remove_prefix()
//
// Removes the first `n` characters from the `string_view`, returning a
// pointer to the new first character. Note that the underlying std::string is not
// changed, only the view.
// Removes the first `n` characters from the `string_view`. Note that the
// underlying std::string is not changed, only the view.
void remove_prefix(size_type n) {
assert(n <= length_);
ptr_ += n;

View file

@ -922,6 +922,10 @@ TEST(StringViewTest, ConstexprCompiles) {
constexpr absl::string_view::iterator const_begin_empty = sp.begin();
constexpr absl::string_view::iterator const_end_empty = sp.end();
EXPECT_EQ(const_begin_empty, const_end_empty);
constexpr absl::string_view::iterator const_begin_nullptr = cstr.begin();
constexpr absl::string_view::iterator const_end_nullptr = cstr.end();
EXPECT_EQ(const_begin_nullptr, const_end_nullptr);
#endif
constexpr absl::string_view::iterator const_begin = cstr_len.begin();

View file

@ -34,6 +34,9 @@ cc_library(
"internal/graphcycles.h",
],
copts = ABSL_DEFAULT_COPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
"//absl/base",
"//absl/base:core_headers",

View file

@ -89,8 +89,6 @@ static void CheckSumG0G1(void *v) {
}
static void TestMu(TestContext *cxt, int c) {
SetInvariantChecked(false);
cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
for (int i = 0; i != cxt->iterations; i++) {
absl::MutexLock l(&cxt->mu);
int a = cxt->g0 + 1;
@ -100,8 +98,6 @@ static void TestMu(TestContext *cxt, int c) {
}
static void TestTry(TestContext *cxt, int c) {
SetInvariantChecked(false);
cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
for (int i = 0; i != cxt->iterations; i++) {
do {
std::this_thread::yield();
@ -122,8 +118,6 @@ static void TestR20ms(TestContext *cxt, int c) {
}
static void TestRW(TestContext *cxt, int c) {
SetInvariantChecked(false);
cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
if ((c & 1) == 0) {
for (int i = 0; i != cxt->iterations; i++) {
absl::WriterMutexLock l(&cxt->mu);
@ -356,68 +350,58 @@ static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
cv->Signal();
}
// Basis for the parameterized tests configured below.
static int RunTest(void (*test)(TestContext *cxt, int), int threads,
int iterations, int operations) {
TestContext cxt;
// Code common to RunTest() and RunTestWithInvariantDebugging().
static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
int threads, int iterations, int operations) {
absl::Mutex mu2;
absl::CondVar cv2;
int c0;
int c1;
// run with large thread count for full test and to get timing
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
absl::EnableMutexInvariantDebugging(false);
#endif
c0 = 0;
c1 = 0;
cxt.g0 = 0;
cxt.g1 = 0;
cxt.iterations = iterations;
cxt.threads = threads;
int c0 = 0;
int c1 = 0;
cxt->g0 = 0;
cxt->g1 = 0;
cxt->iterations = iterations;
cxt->threads = threads;
absl::synchronization_internal::ThreadPool tp(threads);
for (int i = 0; i != threads; i++) {
tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
std::function<void(int)>(
std::bind(test, &cxt, std::placeholders::_1))));
std::bind(test, cxt, std::placeholders::_1))));
}
mu2.Lock();
while (c1 != threads) {
cv2.Wait(&mu2);
}
mu2.Unlock();
int saved_g0 = cxt.g0;
// run again with small number of iterations to test invariant checking
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
absl::EnableMutexInvariantDebugging(true);
#endif
SetInvariantChecked(true);
c0 = 0;
c1 = 0;
cxt.g0 = 0;
cxt.g1 = 0;
cxt.iterations = (iterations > 10 ? 10 : iterations);
cxt.threads = threads;
for (int i = 0; i != threads; i++) {
tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
std::function<void(int)>(
std::bind(test, &cxt, std::placeholders::_1))));
}
mu2.Lock();
while (c1 != threads) {
cv2.Wait(&mu2);
}
mu2.Unlock();
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
#endif
return saved_g0;
return cxt->g0;
}
// Basis for the parameterized tests configured below.
static int RunTest(void (*test)(TestContext *cxt, int), int threads,
int iterations, int operations) {
TestContext cxt;
return RunTestCommon(&cxt, test, threads, iterations, operations);
}
// Like RunTest(), but sets an invariant on the tested Mutex and
// verifies that the invariant check happened. The invariant function
// will be passed the TestContext* as its arg and must call
// SetInvariantChecked(true);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
int threads, int iterations,
int operations,
void (*invariant)(void *)) {
absl::EnableMutexInvariantDebugging(true);
SetInvariantChecked(false);
TestContext cxt;
cxt.mu.EnableInvariantDebugging(invariant, &cxt);
int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
absl::EnableMutexInvariantDebugging(false); // Restore.
return ret;
}
#endif
// --------------------------------------------------------
// Test for fix of bug in TryRemove()
struct TimeoutBugStruct {
@ -1463,6 +1447,13 @@ TEST_P(MutexVariableThreadCountTest, Mutex) {
int iterations = ScaleIterations(10000000) / threads;
int operations = threads * iterations;
EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
iterations = std::min(iterations, 10);
operations = threads * iterations;
EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
operations, CheckSumG0G1),
operations);
#endif
}
TEST_P(MutexVariableThreadCountTest, Try) {
@ -1470,6 +1461,13 @@ TEST_P(MutexVariableThreadCountTest, Try) {
int iterations = 1000000 / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
iterations = std::min(iterations, 10);
operations = threads * iterations;
EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
operations, CheckSumG0G1),
operations);
#endif
}
TEST_P(MutexVariableThreadCountTest, R20ms) {
@ -1484,6 +1482,13 @@ TEST_P(MutexVariableThreadCountTest, RW) {
int iterations = ScaleIterations(20000000) / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
iterations = std::min(iterations, 10);
operations = threads * iterations;
EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
operations, CheckSumG0G1),
operations / 2);
#endif
}
TEST_P(MutexVariableThreadCountTest, Await) {

View file

@ -57,6 +57,9 @@ cc_library(
],
hdrs = ["internal/test_util.h"],
copts = ABSL_DEFAULT_COPTS,
visibility = [
"//absl/time:__pkg__",
],
deps = [
":time",
"//absl/base",

View file

@ -63,7 +63,7 @@ const struct ZoneInfo {
{"US/Pacific", //
reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
// Allows use of the local time zone from a common system-specific location.
// Allows use of the local time zone from a system-specific location.
#ifdef _MSC_VER
{"localtime", //
reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},

View file

@ -1126,8 +1126,10 @@ constexpr Duration OppositeInfinity(Duration d) {
: MakeDuration(std::numeric_limits<int64_t>::min(), ~0U);
}
// Returns (-n)-1 (equivalently -(n+1)) without overflowing on any input value.
// Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
constexpr int64_t NegateAndSubtractOne(int64_t n) {
// Note: Good compilers will optimize this expression to ~n when using
// a two's-complement representation (which is required for int64_t).
return (n < 0) ? -(n + 1) : (-n) - 1;
}
@ -1232,31 +1234,26 @@ constexpr bool operator==(Duration lhs, Duration rhs) {
constexpr Duration operator-(Duration d) {
// This is a little interesting because of the special cases.
//
// Infinities stay infinite, and just change direction.
// If rep_lo_ is zero, we have it easy; it's safe to negate rep_hi_, we're
// dealing with an integral number of seconds, and the only special case is
// the maximum negative finite duration, which can't be negated.
//
// The maximum negative finite duration can't be negated (at least, not
// on a two's complement machine), so we return infinity for that case.
// Next we dispatch the case where rep_lo_ is zero, observing that it's
// safe to negate rep_hi_ in this case because it's not int64_t-min (or
// else we'd have handled it above, returning InfiniteDuration()).
// Infinities stay infinite, and just change direction.
//
// Finally we're in the case where rep_lo_ is non-zero, and we can borrow
// a second's worth of ticks and avoid overflow (as negating int64_t-min + 1
// is safe).
return time_internal::IsInfiniteDuration(d)
? time_internal::OppositeInfinity(d)
: (time_internal::GetRepHi(d) ==
std::numeric_limits<int64_t>::min() &&
time_internal::GetRepLo(d) == 0)
return time_internal::GetRepLo(d) == 0
? time_internal::GetRepHi(d) == std::numeric_limits<int64_t>::min()
? InfiniteDuration()
: (time_internal::GetRepLo(d) == 0)
? time_internal::MakeDuration(
-time_internal::GetRepHi(d))
: time_internal::MakeDuration(
time_internal::NegateAndSubtractOne(
time_internal::GetRepHi(d)),
time_internal::kTicksPerSecond -
time_internal::GetRepLo(d));
: time_internal::MakeDuration(-time_internal::GetRepHi(d))
: time_internal::IsInfiniteDuration(d)
? time_internal::OppositeInfinity(d)
: time_internal::MakeDuration(
time_internal::NegateAndSubtractOne(
time_internal::GetRepHi(d)),
time_internal::kTicksPerSecond -
time_internal::GetRepLo(d));
}
constexpr Duration Nanoseconds(int64_t n) {

View file

@ -94,23 +94,20 @@ namespace absl {
namespace any_internal {
// FastTypeId<Type>() evaluates at compile/link-time to a unique integer for the
// passed in type. Their values are neither contiguous nor small, making them
// unfit for using as an index into a vector, but a good match for keys into
// maps or straight up comparisons.
// Note that on 64-bit (unix) systems size_t is 64-bit while int is 32-bit and
// the compiler will happily and quietly assign such a 64-bit value to a
// 32-bit integer. While a client should never do that it SHOULD still be safe,
// assuming the BSS segment doesn't span more than 4GiB.
template<typename Type>
inline size_t FastTypeId() {
static_assert(sizeof(char*) <= sizeof(size_t),
"ptr size too large for size_t");
template <typename Type>
struct TypeTag {
constexpr static char dummy_var = 0;
};
// This static variable isn't actually used, only its address, so there are
// no concurrency issues.
static char dummy_var;
return reinterpret_cast<size_t>(&dummy_var);
template <typename Type>
constexpr char TypeTag<Type>::dummy_var;
// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
// passed in type. These are meant to be good match for keys into maps or
// straight up comparisons.
template<typename Type>
constexpr inline const void* FastTypeId() {
return &TypeTag<Type>::dummy_var;
}
} // namespace any_internal
@ -382,7 +379,7 @@ class any {
public:
virtual ~ObjInterface() = default;
virtual std::unique_ptr<ObjInterface> Clone() const = 0;
virtual size_t type_id() const noexcept = 0;
virtual const void* ObjTypeId() const noexcept = 0;
#if ABSL_ANY_DETAIL_HAS_RTTI
virtual const std::type_info& Type() const noexcept = 0;
#endif // ABSL_ANY_DETAIL_HAS_RTTI
@ -400,7 +397,7 @@ class any {
return std::unique_ptr<ObjInterface>(new Obj(in_place, value));
}
size_t type_id() const noexcept final { return IdForType<T>(); }
const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
#if ABSL_ANY_DETAIL_HAS_RTTI
const std::type_info& Type() const noexcept final { return typeid(T); }
@ -415,7 +412,7 @@ class any {
}
template <typename T>
static size_t IdForType() {
constexpr static const void* IdForType() {
// Note: This type dance is to make the behavior consistent with typeid.
using NormalizedType =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
@ -423,8 +420,8 @@ class any {
return any_internal::FastTypeId<NormalizedType>();
}
size_t GetObjTypeId() const {
return obj_ == nullptr ? any_internal::FastTypeId<void>() : obj_->type_id();
const void* GetObjTypeId() const {
return obj_ ? obj_->ObjTypeId() : any_internal::FastTypeId<void>();
}
// `absl::any` nonmember functions //

View file

@ -270,8 +270,17 @@ TEST(optionalTest, CopyConstructor) {
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<const TrivialCopyable>>::value);
#endif
// When testing with VS 2017 15.3, there seems to be a bug in MSVC
// std::optional when T is volatile-qualified. So skipping this test.
// Bug report:
// https://connect.microsoft.com/VisualStudio/feedback/details/3142534
#if defined(ABSL_HAVE_STD_OPTIONAL) && defined(_MSC_VER) && _MSC_VER >= 1911
#define ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG 1
#endif
#ifndef ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG
EXPECT_FALSE(std::is_copy_constructible<
absl::optional<volatile TrivialCopyable>>::value);
#endif
}
}

View file

@ -378,7 +378,7 @@ class Span {
//
// Returns a reference to the i'th element of this span.
constexpr reference at(size_type i) const {
return ABSL_PREDICT_FALSE(i < size())
return ABSL_PREDICT_TRUE(i < size())
? ptr_[i]
: (base_internal::ThrowStdOutOfRange(
"Span::at failed bounds check"),