merge(3p/absl): subtree merge of Abseil up to e19260f

... notably, this includes Abseil's own StatusOr type, which
conflicted with our implementation (that was taken from TensorFlow).

Change-Id: Ie7d6764b64055caaeb8dc7b6b9d066291e6b538f
This commit is contained in:
Vincent Ambo 2020-11-21 14:43:54 +01:00
parent cc27324d02
commit 082c006c04
854 changed files with 11260 additions and 5296 deletions

View file

@ -0,0 +1,41 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'bug'
assignees: ''
---
**Describe the bug**
Include a clear and concise description of what the problem is, including what
you expected to happen, and what actually happened.
**Steps to reproduce the bug**
It's important that we are able to reproduce the problem that you are
experiencing. Please provide all code and relevant steps to reproduce the
problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links
to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the
problem are also helpful.
**What version of Abseil are you using?**
**What operating system and version are you using**
If you are using a Linux distribution please include the name and version of the
distribution as well.
**What compiler and version are you using?**
Please include the output of `gcc -v` or `clang -v`, or the equivalent for your
compiler.
**What build system are you using?**
Please include the output of `bazel --version` or `cmake --version`, or the
equivalent for your build system.
**Additional context**
Add any other context about the problem here.

View file

@ -0,0 +1,7 @@
---
name: Question
about: Have a question? Ask us anything! :-)
title: ''
labels: 'question'
assignees: ''
---

View file

@ -0,0 +1 @@
blank_issues_enables: true

View file

@ -8,7 +8,6 @@ set(ABSL_INTERNAL_DLL_FILES
"base/casts.h" "base/casts.h"
"base/config.h" "base/config.h"
"base/const_init.h" "base/const_init.h"
"base/dynamic_annotations.cc"
"base/dynamic_annotations.h" "base/dynamic_annotations.h"
"base/internal/atomic_hook.h" "base/internal/atomic_hook.h"
"base/internal/bits.h" "base/internal/bits.h"
@ -139,7 +138,6 @@ set(ABSL_INTERNAL_DLL_FILES
"random/internal/distribution_caller.h" "random/internal/distribution_caller.h"
"random/internal/fastmath.h" "random/internal/fastmath.h"
"random/internal/fast_uniform_bits.h" "random/internal/fast_uniform_bits.h"
"random/internal/gaussian_distribution_gentables.cc"
"random/internal/generate_real.h" "random/internal/generate_real.h"
"random/internal/iostream_state_saver.h" "random/internal/iostream_state_saver.h"
"random/internal/mock_helpers.h" "random/internal/mock_helpers.h"
@ -176,8 +174,12 @@ set(ABSL_INTERNAL_DLL_FILES
"random/uniform_int_distribution.h" "random/uniform_int_distribution.h"
"random/uniform_real_distribution.h" "random/uniform_real_distribution.h"
"random/zipf_distribution.h" "random/zipf_distribution.h"
"status/internal/status_internal.h"
"status/internal/statusor_internal.h"
"status/status.h" "status/status.h"
"status/status.cc" "status/status.cc"
"status/statusor.h"
"status/statusor.cc"
"status/status_payload_printer.h" "status/status_payload_printer.h"
"status/status_payload_printer.cc" "status/status_payload_printer.cc"
"strings/ascii.cc" "strings/ascii.cc"
@ -194,6 +196,7 @@ set(ABSL_INTERNAL_DLL_FILES
"strings/internal/charconv_parse.cc" "strings/internal/charconv_parse.cc"
"strings/internal/charconv_parse.h" "strings/internal/charconv_parse.h"
"strings/internal/stl_type_traits.h" "strings/internal/stl_type_traits.h"
"strings/internal/string_constant.h"
"strings/match.cc" "strings/match.cc"
"strings/match.h" "strings/match.h"
"strings/numbers.cc" "strings/numbers.cc"
@ -248,6 +251,7 @@ set(ABSL_INTERNAL_DLL_FILES
"synchronization/notification.h" "synchronization/notification.h"
"synchronization/internal/create_thread_identity.cc" "synchronization/internal/create_thread_identity.cc"
"synchronization/internal/create_thread_identity.h" "synchronization/internal/create_thread_identity.h"
"synchronization/internal/futex.h"
"synchronization/internal/graphcycles.cc" "synchronization/internal/graphcycles.cc"
"synchronization/internal/graphcycles.h" "synchronization/internal/graphcycles.h"
"synchronization/internal/kernel_timeout.h" "synchronization/internal/kernel_timeout.h"

View file

@ -23,7 +23,9 @@ include(AbseilInstallDirs)
# project that sets # project that sets
# set_property(GLOBAL PROPERTY USE_FOLDERS ON) # set_property(GLOBAL PROPERTY USE_FOLDERS ON)
# For example, Visual Studio supports folders. # For example, Visual Studio supports folders.
set(ABSL_IDE_FOLDER Abseil) if(NOT DEFINED ABSL_IDE_FOLDER)
set(ABSL_IDE_FOLDER Abseil)
endif()
# absl_cc_library() # absl_cc_library()
# #
@ -120,7 +122,11 @@ function(absl_cc_library)
# 4. "static" -- This target does not depend on the DLL and should be built # 4. "static" -- This target does not depend on the DLL and should be built
# statically. # statically.
if (${ABSL_BUILD_DLL}) if (${ABSL_BUILD_DLL})
absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll) if(ABSL_ENABLE_INSTALL)
absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll)
else()
absl_internal_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_dll)
endif()
if (${_in_dll}) if (${_in_dll})
# This target should be replaced by the DLL # This target should be replaced by the DLL
set(_build_type "dll") set(_build_type "dll")
@ -135,6 +141,47 @@ function(absl_cc_library)
set(_build_type "static") set(_build_type "static")
endif() endif()
# Generate a pkg-config file for every library:
if(${_build_type} STREQUAL "static" OR ${_build_type} STREQUAL "shared")
if(NOT ABSL_CC_LIB_TESTONLY)
if(absl_VERSION)
set(PC_VERSION "${absl_VERSION}")
else()
set(PC_VERSION "head")
endif()
foreach(dep ${ABSL_CC_LIB_DEPS})
if(${dep} MATCHES "^absl::(.*)")
set(PC_DEPS "${PC_DEPS} absl_${CMAKE_MATCH_1} = ${PC_VERSION}")
endif()
endforeach()
foreach(cflag ${ABSL_CC_LIB_COPTS})
if(${cflag} MATCHES "^(-Wno|/wd)")
# These flags are needed to suppress warnings that might fire in our headers.
set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
elseif(${cflag} MATCHES "^(-W|/w[1234eo])")
# Don't impose our warnings on others.
else()
set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
endif()
endforeach()
FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\
prefix=${CMAKE_INSTALL_PREFIX}\n\
exec_prefix=\${prefix}\n\
libdir=\${prefix}/lib\n\
includedir=\${prefix}/include\n\
\n\
Name: absl_${_NAME}\n\
Description: Abseil ${_NAME} library\n\
URL: https://abseil.io/\n\
Version: ${PC_VERSION}\n\
Requires.private:${PC_DEPS}\n\
Libs: -L\${libdir} $<JOIN:${ABSL_CC_LIB_LINKOPTS}, > $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-labsl_${_NAME}>\n\
Cflags: -I\${includedir}${PC_CFLAGS}\n")
INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc"
DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/pkgconfig")
endif()
endif()
if(NOT ABSL_CC_LIB_IS_INTERFACE) if(NOT ABSL_CC_LIB_IS_INTERFACE)
if(${_build_type} STREQUAL "dll_dep") if(${_build_type} STREQUAL "dll_dep")
# This target depends on the DLL. When adding dependencies to this target, # This target depends on the DLL. When adding dependencies to this target,
@ -213,6 +260,8 @@ function(absl_cc_library)
if(ABSL_ENABLE_INSTALL) if(ABSL_ENABLE_INSTALL)
set_target_properties(${_NAME} PROPERTIES set_target_properties(${_NAME} PROPERTIES
OUTPUT_NAME "absl_${_NAME}" OUTPUT_NAME "absl_${_NAME}"
# TODO(b/173696973): Figure out how to set SOVERSION for LTS releases.
SOVERSION 0
) )
endif() endif()
else() else()

View file

@ -10,7 +10,7 @@ if(absl_VERSION)
set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}") set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}")
set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}") set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}")
set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}") set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}")
set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/{ABSL_SUBDIR}") set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/${ABSL_SUBDIR}")
set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}") set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}")
else() else()
set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}") set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}")

View file

@ -3,24 +3,12 @@ cmake_minimum_required(VERSION 2.8.2)
project(googletest-external NONE) project(googletest-external NONE)
include(ExternalProject) include(ExternalProject)
if(${ABSL_USE_GOOGLETEST_HEAD}) ExternalProject_Add(googletest
ExternalProject_Add(googletest URL "${absl_gtest_download_url}" # May be empty
GIT_REPOSITORY https://github.com/google/googletest.git SOURCE_DIR "${absl_gtest_src_dir}"
GIT_TAG master BINARY_DIR "${absl_gtest_build_dir}"
SOURCE_DIR "${absl_gtest_src_dir}" CONFIGURE_COMMAND ""
BINARY_DIR "${absl_gtest_build_dir}" BUILD_COMMAND ""
CONFIGURE_COMMAND "" INSTALL_COMMAND ""
BUILD_COMMAND "" TEST_COMMAND ""
INSTALL_COMMAND "" )
TEST_COMMAND ""
)
else()
ExternalProject_Add(googletest
SOURCE_DIR "${absl_gtest_src_dir}"
BINARY_DIR "${absl_gtest_build_dir}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()

View file

@ -118,6 +118,24 @@ if ! grep absl::strings "${libdir}/cmake/${absl_subdir}/abslTargets.cmake"; then
exit 1 exit 1
fi fi
pushd "${HOME}"
cat > hello-abseil.cc << EOF
#include <cstdlib>
#include "absl/strings/str_format.h"
int main(int argc, char **argv) {
absl::PrintF("Hello Abseil!\n");
return EXIT_SUCCESS;
}
EOF
export PKG_CONFIG_PATH="${install_dir}/${libdir}/pkgconfig"
pc_args=($(pkg-config --cflags --libs --static absl_str_format))
g++ -static -o hello-abseil hello-abseil.cc "${pc_args[@]}"
hello="$(./hello-abseil)"
[[ "${hello}" == "Hello Abseil!" ]]
popd
uninstall_absl uninstall_absl
popd popd

View file

@ -22,13 +22,24 @@
cmake_minimum_required(VERSION 3.5) cmake_minimum_required(VERSION 3.5)
# Compiler id for Apple Clang is now AppleClang. # Compiler id for Apple Clang is now AppleClang.
cmake_policy(SET CMP0025 NEW) if (POLICY CMP0025)
cmake_policy(SET CMP0025 NEW)
endif (POLICY CMP0025)
# if command can use IN_LIST # if command can use IN_LIST
cmake_policy(SET CMP0057 NEW) if (POLICY CMP0057)
cmake_policy(SET CMP0057 NEW)
endif (POLICY CMP0057)
# Project version variables are the empty string if version is unspecified # Project version variables are the empty string if version is unspecified
cmake_policy(SET CMP0048 NEW) if (POLICY CMP0048)
cmake_policy(SET CMP0048 NEW)
endif (POLICY CMP0048)
# option() honor variables
if (POLICY CMP0077)
cmake_policy(SET CMP0077 NEW)
endif (POLICY CMP0077)
project(absl CXX) project(absl CXX)
@ -41,9 +52,9 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
# when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp)) # when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp))
# in the source tree of a project that uses it, install rules are disabled. # in the source tree of a project that uses it, install rules are disabled.
if(NOT "^${CMAKE_SOURCE_DIR}$" STREQUAL "^${PROJECT_SOURCE_DIR}$") if(NOT "^${CMAKE_SOURCE_DIR}$" STREQUAL "^${PROJECT_SOURCE_DIR}$")
set(ABSL_ENABLE_INSTALL FALSE) option(ABSL_ENABLE_INSTALL "Enable install rule" OFF)
else() else()
set(ABSL_ENABLE_INSTALL TRUE) option(ABSL_ENABLE_INSTALL "Enable install rule" ON)
endif() endif()
list(APPEND CMAKE_MODULE_PATH list(APPEND CMAKE_MODULE_PATH
@ -87,12 +98,13 @@ find_package(Threads REQUIRED)
option(ABSL_USE_EXTERNAL_GOOGLETEST option(ABSL_USE_EXTERNAL_GOOGLETEST
"If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF) "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF)
option(ABSL_USE_GOOGLETEST_HEAD option(ABSL_USE_GOOGLETEST_HEAD
"If ON, abseil will download HEAD from googletest at config time." OFF) "If ON, abseil will download HEAD from GoogleTest at config time." OFF)
set(ABSL_GOOGLETEST_DOWNLOAD_URL "" CACHE STRING "If set, download GoogleTest from this URL")
set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH
"If ABSL_USE_GOOGLETEST_HEAD is OFF, specifies the directory of a local googletest checkout." "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout."
) )
option(ABSL_RUN_TESTS "If ON, Abseil tests will be run." OFF) option(ABSL_RUN_TESTS "If ON, Abseil tests will be run." OFF)
@ -101,12 +113,19 @@ if(${ABSL_RUN_TESTS})
# enable CTest. This will set BUILD_TESTING to ON unless otherwise specified # enable CTest. This will set BUILD_TESTING to ON unless otherwise specified
# on the command line # on the command line
include(CTest) include(CTest)
enable_testing()
## check targets ## check targets
if (NOT ABSL_USE_EXTERNAL_GOOGLETEST) if (NOT ABSL_USE_EXTERNAL_GOOGLETEST)
set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build) set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build)
if(${ABSL_USE_GOOGLETEST_HEAD}) if(ABSL_USE_GOOGLETEST_HEAD AND ABSL_GOOGLETEST_DOWNLOAD_URL)
message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL")
endif()
if(ABSL_USE_GOOGLETEST_HEAD)
set(absl_gtest_download_url "https://github.com/google/googletest/archive/master.zip")
elseif(ABSL_GOOGLETEST_DOWNLOAD_URL)
set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL})
endif()
if(absl_gtest_download_url)
set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src) set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src)
else() else()
set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR}) set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR})

View file

@ -20,9 +20,10 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# GoogleTest/GoogleMock framework. Used by most unit-tests. # GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive( http_archive(
name = "com_google_googletest", name = "com_google_googletest",
urls = ["https://github.com/google/googletest/archive/011959aafddcd30611003de96cfd8d7a7685c700.zip"], # 2020-05-14T00:36:05Z # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh.
strip_prefix = "googletest-011959aafddcd30611003de96cfd8d7a7685c700", urls = ["https://github.com/google/googletest/archive/8567b09290fe402cf01923e2131c5635b8ed851b.zip"], # 2020-06-12T22:24:28Z
sha256 = "6a5d7d63cd6e0ad2a7130471105a3b83799a7a2b14ef7ec8d742b54f01a4833c", strip_prefix = "googletest-8567b09290fe402cf01923e2131c5635b8ed851b",
sha256 = "9a8a166eb6a56c7b3d7b19dc2c946fe4778fd6f21c7a12368ad3b836d8f1be48",
) )
# Google benchmark. # Google benchmark.
@ -39,7 +40,6 @@ http_archive(
sha256 = "9a446e9dd9c1bb180c86977a8dc1e9e659550ae732ae58bd2e8fd51e15b2c91d", sha256 = "9a446e9dd9c1bb180c86977a8dc1e9e659550ae732ae58bd2e8fd51e15b2c91d",
strip_prefix = "rules_cc-262ebec3c2296296526740db4aefce68c80de7fa", strip_prefix = "rules_cc-262ebec3c2296296526740db4aefce68c80de7fa",
urls = [ urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip",
"https://github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip", "https://github.com/bazelbuild/rules_cc/archive/262ebec3c2296296526740db4aefce68c80de7fa.zip",
], ],
) )

View file

@ -12,19 +12,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
load(
":compiler_config_setting.bzl",
"create_llvm_config",
)
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0 licenses(["notice"])
create_llvm_config( config_setting(
name = "llvm_compiler", name = "clang_compiler",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "clang",
},
visibility = [":__subpackages__"], visibility = [":__subpackages__"],
) )
@ -58,3 +55,11 @@ config_setting(
}, },
visibility = [":__subpackages__"], visibility = [":__subpackages__"],
) )
config_setting(
name = "wasm",
values = {
"cpu": "wasm32",
},
visibility = [":__subpackages__"],
)

View file

@ -40,8 +40,8 @@ Pod::Spec.new do |s|
'USE_HEADERMAP' => 'NO', 'USE_HEADERMAP' => 'NO',
'ALWAYS_SEARCH_USER_PATHS' => 'NO', 'ALWAYS_SEARCH_USER_PATHS' => 'NO',
} }
s.ios.deployment_target = '7.0' s.ios.deployment_target = '9.0'
s.osx.deployment_target = '10.9' s.osx.deployment_target = '10.10'
s.tvos.deployment_target = '9.0' s.tvos.deployment_target = '9.0'
s.watchos.deployment_target = '2.0' s.watchos.deployment_target = '2.0'
""" """

View file

@ -24,7 +24,7 @@ load(
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0 licenses(["notice"])
cc_library( cc_library(
name = "algorithm", name = "algorithm",

View file

@ -90,10 +90,10 @@ using ContainerPointerType =
// lookup of std::begin and std::end, i.e. // lookup of std::begin and std::end, i.e.
// using std::begin; // using std::begin;
// using std::end; // using std::end;
// std::foo(begin(c), end(c); // std::foo(begin(c), end(c));
// becomes // becomes
// std::foo(container_algorithm_internal::begin(c), // std::foo(container_algorithm_internal::begin(c),
// container_algorithm_internal::end(c)); // container_algorithm_internal::end(c));
// These are meant for internal use only. // These are meant for internal use only.
template <typename C> template <typename C>
@ -188,7 +188,7 @@ bool c_any_of(const C& c, Pred&& pred) {
// c_none_of() // c_none_of()
// //
// Container-based version of the <algorithm> `std::none_of()` function to // Container-based version of the <algorithm> `std::none_of()` function to
// test if no elements in a container fulfil a condition. // test if no elements in a container fulfill a condition.
template <typename C, typename Pred> template <typename C, typename Pred>
bool c_none_of(const C& c, Pred&& pred) { bool c_none_of(const C& c, Pred&& pred) {
return std::none_of(container_algorithm_internal::c_begin(c), return std::none_of(container_algorithm_internal::c_begin(c),
@ -340,24 +340,45 @@ container_algorithm_internal::ContainerDifferenceType<const C> c_count_if(
// c_mismatch() // c_mismatch()
// //
// Container-based version of the <algorithm> `std::mismatch()` function to // Container-based version of the <algorithm> `std::mismatch()` function to
// return the first element where two ordered containers differ. // return the first element where two ordered containers differ. Applies `==` to
// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
template <typename C1, typename C2> template <typename C1, typename C2>
container_algorithm_internal::ContainerIterPairType<C1, C2> container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2) { c_mismatch(C1& c1, C2& c2) {
return std::mismatch(container_algorithm_internal::c_begin(c1), auto first1 = container_algorithm_internal::c_begin(c1);
container_algorithm_internal::c_end(c1), auto last1 = container_algorithm_internal::c_end(c1);
container_algorithm_internal::c_begin(c2)); auto first2 = container_algorithm_internal::c_begin(c2);
auto last2 = container_algorithm_internal::c_end(c2);
for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
// Negates equality because Cpp17EqualityComparable doesn't require clients
// to overload both `operator==` and `operator!=`.
if (!(*first1 == *first2)) {
break;
}
}
return std::make_pair(first1, first2);
} }
// Overload of c_mismatch() for using a predicate evaluation other than `==` as // Overload of c_mismatch() for using a predicate evaluation other than `==` as
// the function's test condition. // the function's test condition. Applies `pred`to the first N elements of `c1`
// and `c2`, where N = min(size(c1), size(c2)).
template <typename C1, typename C2, typename BinaryPredicate> template <typename C1, typename C2, typename BinaryPredicate>
container_algorithm_internal::ContainerIterPairType<C1, C2> container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2, BinaryPredicate&& pred) { c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) {
return std::mismatch(container_algorithm_internal::c_begin(c1), auto first1 = container_algorithm_internal::c_begin(c1);
container_algorithm_internal::c_end(c1), auto last1 = container_algorithm_internal::c_end(c1);
container_algorithm_internal::c_begin(c2), auto first2 = container_algorithm_internal::c_begin(c2);
std::forward<BinaryPredicate>(pred)); auto last2 = container_algorithm_internal::c_end(c2);
for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
if (!pred(*first1, *first2)) {
break;
}
}
return std::make_pair(first1, first2);
} }
// c_equal() // c_equal()
@ -539,12 +560,20 @@ BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) {
// c_swap_ranges() // c_swap_ranges()
// //
// Container-based version of the <algorithm> `std::swap_ranges()` function to // Container-based version of the <algorithm> `std::swap_ranges()` function to
// swap a container's elements with another container's elements. // swap a container's elements with another container's elements. Swaps the
// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
template <typename C1, typename C2> template <typename C1, typename C2>
container_algorithm_internal::ContainerIter<C2> c_swap_ranges(C1& c1, C2& c2) { container_algorithm_internal::ContainerIter<C2> c_swap_ranges(C1& c1, C2& c2) {
return std::swap_ranges(container_algorithm_internal::c_begin(c1), auto first1 = container_algorithm_internal::c_begin(c1);
container_algorithm_internal::c_end(c1), auto last1 = container_algorithm_internal::c_end(c1);
container_algorithm_internal::c_begin(c2)); auto first2 = container_algorithm_internal::c_begin(c2);
auto last2 = container_algorithm_internal::c_end(c2);
using std::swap;
for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
swap(*first1, *first2);
}
return first2;
} }
// c_transform() // c_transform()
@ -562,16 +591,23 @@ OutputIterator c_transform(const InputSequence& input, OutputIterator output,
} }
// Overload of c_transform() for performing a transformation using a binary // Overload of c_transform() for performing a transformation using a binary
// predicate. // predicate. Applies `binary_op` to the first N elements of `c1` and `c2`,
// where N = min(size(c1), size(c2)).
template <typename InputSequence1, typename InputSequence2, template <typename InputSequence1, typename InputSequence2,
typename OutputIterator, typename BinaryOp> typename OutputIterator, typename BinaryOp>
OutputIterator c_transform(const InputSequence1& input1, OutputIterator c_transform(const InputSequence1& input1,
const InputSequence2& input2, OutputIterator output, const InputSequence2& input2, OutputIterator output,
BinaryOp&& binary_op) { BinaryOp&& binary_op) {
return std::transform(container_algorithm_internal::c_begin(input1), auto first1 = container_algorithm_internal::c_begin(input1);
container_algorithm_internal::c_end(input1), auto last1 = container_algorithm_internal::c_end(input1);
container_algorithm_internal::c_begin(input2), output, auto first2 = container_algorithm_internal::c_begin(input2);
std::forward<BinaryOp>(binary_op)); auto last2 = container_algorithm_internal::c_end(input2);
for (; first1 != last1 && first2 != last2;
++first1, (void)++first2, ++output) {
*output = binary_op(*first1, *first2);
}
return output;
} }
// c_replace() // c_replace()

View file

@ -57,9 +57,7 @@ class NonMutatingTest : public testing::Test {
}; };
struct AccumulateCalls { struct AccumulateCalls {
void operator()(int value) { void operator()(int value) { calls.push_back(value); }
calls.push_back(value);
}
std::vector<int> calls; std::vector<int> calls;
}; };
@ -68,7 +66,6 @@ bool BinPredicate(int v1, int v2) { return v1 < v2; }
bool Equals(int v1, int v2) { return v1 == v2; } bool Equals(int v1, int v2) { return v1 == v2; }
bool IsOdd(int x) { return x % 2 != 0; } bool IsOdd(int x) { return x % 2 != 0; }
TEST_F(NonMutatingTest, Distance) { TEST_F(NonMutatingTest, Distance) {
EXPECT_EQ(container_.size(), absl::c_distance(container_)); EXPECT_EQ(container_.size(), absl::c_distance(container_));
EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_)); EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_));
@ -151,13 +148,90 @@ TEST_F(NonMutatingTest, CountIf) {
} }
TEST_F(NonMutatingTest, Mismatch) { TEST_F(NonMutatingTest, Mismatch) {
absl::c_mismatch(container_, sequence_); // Testing necessary as absl::c_mismatch executes logic.
absl::c_mismatch(sequence_, container_); {
auto result = absl::c_mismatch(vector_, sequence_);
EXPECT_EQ(result.first, vector_.end());
EXPECT_EQ(result.second, sequence_.end());
}
{
auto result = absl::c_mismatch(sequence_, vector_);
EXPECT_EQ(result.first, sequence_.end());
EXPECT_EQ(result.second, vector_.end());
}
sequence_.back() = 5;
{
auto result = absl::c_mismatch(vector_, sequence_);
EXPECT_EQ(result.first, std::prev(vector_.end()));
EXPECT_EQ(result.second, std::prev(sequence_.end()));
}
{
auto result = absl::c_mismatch(sequence_, vector_);
EXPECT_EQ(result.first, std::prev(sequence_.end()));
EXPECT_EQ(result.second, std::prev(vector_.end()));
}
sequence_.pop_back();
{
auto result = absl::c_mismatch(vector_, sequence_);
EXPECT_EQ(result.first, std::prev(vector_.end()));
EXPECT_EQ(result.second, sequence_.end());
}
{
auto result = absl::c_mismatch(sequence_, vector_);
EXPECT_EQ(result.first, sequence_.end());
EXPECT_EQ(result.second, std::prev(vector_.end()));
}
{
struct NoNotEquals {
constexpr bool operator==(NoNotEquals) const { return true; }
constexpr bool operator!=(NoNotEquals) const = delete;
};
std::vector<NoNotEquals> first;
std::list<NoNotEquals> second;
// Check this still compiles.
absl::c_mismatch(first, second);
}
} }
TEST_F(NonMutatingTest, MismatchWithPredicate) { TEST_F(NonMutatingTest, MismatchWithPredicate) {
absl::c_mismatch(container_, sequence_, BinPredicate); // Testing necessary as absl::c_mismatch executes logic.
absl::c_mismatch(sequence_, container_, BinPredicate); {
auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
EXPECT_EQ(result.first, vector_.begin());
EXPECT_EQ(result.second, sequence_.begin());
}
{
auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
EXPECT_EQ(result.first, sequence_.begin());
EXPECT_EQ(result.second, vector_.begin());
}
sequence_.front() = 0;
{
auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
EXPECT_EQ(result.first, vector_.begin());
EXPECT_EQ(result.second, sequence_.begin());
}
{
auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
EXPECT_EQ(result.first, std::next(sequence_.begin()));
EXPECT_EQ(result.second, std::next(vector_.begin()));
}
sequence_.clear();
{
auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
EXPECT_EQ(result.first, vector_.begin());
EXPECT_EQ(result.second, sequence_.end());
}
{
auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
EXPECT_EQ(result.first, sequence_.end());
EXPECT_EQ(result.second, vector_.begin());
}
} }
TEST_F(NonMutatingTest, Equal) { TEST_F(NonMutatingTest, Equal) {
@ -519,11 +593,9 @@ TEST_F(SortingTest, IsSortedUntil) {
TEST_F(SortingTest, NthElement) { TEST_F(SortingTest, NthElement) {
std::vector<int> unsorted = {2, 4, 1, 3}; std::vector<int> unsorted = {2, 4, 1, 3};
absl::c_nth_element(unsorted, unsorted.begin() + 2); absl::c_nth_element(unsorted, unsorted.begin() + 2);
EXPECT_THAT(unsorted, EXPECT_THAT(unsorted, ElementsAre(Lt(3), Lt(3), 3, Gt(3)));
ElementsAre(Lt(3), Lt(3), 3, Gt(3)));
absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater<int>()); absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater<int>());
EXPECT_THAT(unsorted, EXPECT_THAT(unsorted, ElementsAre(Gt(2), Gt(2), 2, Lt(2)));
ElementsAre(Gt(2), Gt(2), 2, Lt(2)));
} }
TEST(MutatingTest, IsPartitioned) { TEST(MutatingTest, IsPartitioned) {
@ -676,6 +748,15 @@ TEST(MutatingTest, SwapRanges) {
absl::c_swap_ranges(odds, evens); absl::c_swap_ranges(odds, evens);
EXPECT_THAT(odds, ElementsAre(1, 3, 5)); EXPECT_THAT(odds, ElementsAre(1, 3, 5));
EXPECT_THAT(evens, ElementsAre(2, 4, 6)); EXPECT_THAT(evens, ElementsAre(2, 4, 6));
odds.pop_back();
absl::c_swap_ranges(odds, evens);
EXPECT_THAT(odds, ElementsAre(2, 4));
EXPECT_THAT(evens, ElementsAre(1, 3, 6));
absl::c_swap_ranges(evens, odds);
EXPECT_THAT(odds, ElementsAre(1, 3));
EXPECT_THAT(evens, ElementsAre(2, 4, 6));
} }
TEST_F(NonMutatingTest, Transform) { TEST_F(NonMutatingTest, Transform) {
@ -690,6 +771,20 @@ TEST_F(NonMutatingTest, Transform) {
EXPECT_EQ(std::vector<int>({1, 5, 4}), z); EXPECT_EQ(std::vector<int>({1, 5, 4}), z);
*end = 7; *end = 7;
EXPECT_EQ(std::vector<int>({1, 5, 4, 7}), z); EXPECT_EQ(std::vector<int>({1, 5, 4, 7}), z);
z.clear();
y.pop_back();
end = absl::c_transform(x, y, std::back_inserter(z), std::plus<int>());
EXPECT_EQ(std::vector<int>({1, 5}), z);
*end = 7;
EXPECT_EQ(std::vector<int>({1, 5, 7}), z);
z.clear();
std::swap(x, y);
end = absl::c_transform(x, y, std::back_inserter(z), std::plus<int>());
EXPECT_EQ(std::vector<int>({1, 5}), z);
*end = 7;
EXPECT_EQ(std::vector<int>({1, 5, 7}), z);
} }
TEST(MutatingTest, Replace) { TEST(MutatingTest, Replace) {
@ -755,10 +850,9 @@ MATCHER_P2(IsElement, key, value, "") {
TEST(MutatingTest, StableSort) { TEST(MutatingTest, StableSort) {
std::vector<Element> test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; std::vector<Element> test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}};
absl::c_stable_sort(test_vector); absl::c_stable_sort(test_vector);
EXPECT_THAT( EXPECT_THAT(test_vector,
test_vector, ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1),
ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1), IsElement(2, 0), IsElement(2, 2)));
IsElement(2, 0), IsElement(2, 2)));
} }
TEST(MutatingTest, StableSortWithPredicate) { TEST(MutatingTest, StableSortWithPredicate) {
@ -766,10 +860,9 @@ TEST(MutatingTest, StableSortWithPredicate) {
absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) { absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) {
return e2 < e1; return e2 < e1;
}); });
EXPECT_THAT( EXPECT_THAT(test_vector,
test_vector, ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2),
ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2), IsElement(1, 1), IsElement(1, 0)));
IsElement(1, 1), IsElement(1, 0)));
} }
TEST(MutatingTest, ReplaceCopyIf) { TEST(MutatingTest, ReplaceCopyIf) {

View file

@ -24,7 +24,7 @@ load(
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0 licenses(["notice"])
cc_library( cc_library(
name = "atomic_hook", name = "atomic_hook",
@ -116,7 +116,6 @@ cc_library(
cc_library( cc_library(
name = "dynamic_annotations", name = "dynamic_annotations",
srcs = [ srcs = [
"dynamic_annotations.cc",
"internal/dynamic_annotations.h", "internal/dynamic_annotations.h",
], ],
hdrs = [ hdrs = [
@ -126,6 +125,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
":config", ":config",
":core_headers",
], ],
) )
@ -161,6 +161,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = select({ linkopts = select({
"//absl:windows": [], "//absl:windows": [],
"//absl:wasm": [],
"//conditions:default": ["-pthread"], "//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS, }) + ABSL_DEFAULT_LINKOPTS,
visibility = [ visibility = [
@ -222,6 +223,7 @@ cc_library(
"//absl:windows": [ "//absl:windows": [
"-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:advapi32.lib",
], ],
"//absl:wasm": [],
"//conditions:default": ["-pthread"], "//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS, }) + ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
@ -413,6 +415,7 @@ cc_library(
deps = [ deps = [
":base", ":base",
":base_internal", ":base_internal",
":config",
":core_headers", ":core_headers",
"//absl/synchronization", "//absl/synchronization",
"@com_google_googletest//:gtest", "@com_google_googletest//:gtest",
@ -429,6 +432,7 @@ cc_test(
deps = [ deps = [
":base", ":base",
":base_internal", ":base_internal",
":config",
":core_headers", ":core_headers",
"//absl/synchronization", "//absl/synchronization",
"@com_google_googletest//:gtest_main", "@com_google_googletest//:gtest_main",

View file

@ -105,7 +105,6 @@ absl_cc_library(
HDRS HDRS
"dynamic_annotations.h" "dynamic_annotations.h"
SRCS SRCS
"dynamic_annotations.cc"
"internal/dynamic_annotations.h" "internal/dynamic_annotations.h"
COPTS COPTS
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
@ -385,6 +384,7 @@ absl_cc_library(
${ABSL_TEST_COPTS} ${ABSL_TEST_COPTS}
DEPS DEPS
absl::base absl::base
absl::config
absl::base_internal absl::base_internal
absl::core_headers absl::core_headers
absl::synchronization absl::synchronization
@ -403,6 +403,7 @@ absl_cc_test(
DEPS DEPS
absl::base absl::base
absl::base_internal absl::base_internal
absl::config
absl::core_headers absl::core_headers
absl::synchronization absl::synchronization
gtest_main gtest_main

View file

@ -32,34 +32,12 @@
// of them are not supported in older version of Clang. Thus, we check // of them are not supported in older version of Clang. Thus, we check
// `__has_attribute()` first. If the check fails, we check if we are on GCC and // `__has_attribute()` first. If the check fails, we check if we are on GCC and
// assume the attribute exists on GCC (which is verified on GCC 4.7). // assume the attribute exists on GCC (which is verified on GCC 4.7).
//
// -----------------------------------------------------------------------------
// Sanitizer Attributes
// -----------------------------------------------------------------------------
//
// Sanitizer-related attributes are not "defined" in this file (and indeed
// are not defined as such in any file). To utilize the following
// sanitizer-related attributes within your builds, define the following macros
// within your build using a `-D` flag, along with the given value for
// `-fsanitize`:
//
// * `ADDRESS_SANITIZER` + `-fsanitize=address` (Clang, GCC 4.8)
// * `MEMORY_SANITIZER` + `-fsanitize=memory` (Clang-only)
// * `THREAD_SANITIZER` + `-fsanitize=thread` (Clang, GCC 4.8+)
// * `UNDEFINED_BEHAVIOR_SANITIZER` + `-fsanitize=undefined` (Clang, GCC 4.9+)
// * `CONTROL_FLOW_INTEGRITY` + `-fsanitize=cfi` (Clang-only)
//
// Example:
//
// // Enable branches in the Abseil code that are tagged for ASan:
// $ bazel build --copt=-DADDRESS_SANITIZER --copt=-fsanitize=address
// --linkopt=-fsanitize=address *target*
//
// Since these macro names are only supported by GCC and Clang, we only check
// for `__GNUC__` (GCC or Clang) and the above macros.
#ifndef ABSL_BASE_ATTRIBUTES_H_ #ifndef ABSL_BASE_ATTRIBUTES_H_
#define ABSL_BASE_ATTRIBUTES_H_ #define ABSL_BASE_ATTRIBUTES_H_
#include "absl/base/config.h"
// ABSL_HAVE_ATTRIBUTE // ABSL_HAVE_ATTRIBUTE
// //
// A function-like feature checking macro that is a wrapper around // A function-like feature checking macro that is a wrapper around
@ -234,7 +212,7 @@
// out of bounds or does other scary things with memory. // out of bounds or does other scary things with memory.
// NOTE: GCC supports AddressSanitizer(asan) since 4.8. // NOTE: GCC supports AddressSanitizer(asan) since 4.8.
// https://gcc.gnu.org/gcc-4.8/changes.html // https://gcc.gnu.org/gcc-4.8/changes.html
#if defined(__GNUC__) #if ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
#else #else
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
@ -242,13 +220,13 @@
// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
// //
// Tells the MemorySanitizer to relax the handling of a given function. All // Tells the MemorySanitizer to relax the handling of a given function. All "Use
// "Use of uninitialized value" warnings from such functions will be suppressed, // of uninitialized value" warnings from such functions will be suppressed, and
// and all values loaded from memory will be considered fully initialized. // all values loaded from memory will be considered fully initialized. This
// This attribute is similar to the ADDRESS_SANITIZER attribute above, but deals // attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute
// with initialized-ness rather than addressability issues. // above, but deals with initialized-ness rather than addressability issues.
// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. // NOTE: MemorySanitizer(msan) is supported by Clang but not GCC.
#if defined(__clang__) #if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory)
#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) #define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
#else #else
#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY #define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
@ -259,7 +237,7 @@
// Tells the ThreadSanitizer to not instrument a given function. // Tells the ThreadSanitizer to not instrument a given function.
// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. // NOTE: GCC supports ThreadSanitizer(tsan) since 4.8.
// https://gcc.gnu.org/gcc-4.8/changes.html // https://gcc.gnu.org/gcc-4.8/changes.html
#if defined(__GNUC__) #if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread)
#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) #define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
#else #else
#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD #define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
@ -271,8 +249,10 @@
// where certain behavior (eg. division by zero) is being used intentionally. // where certain behavior (eg. division by zero) is being used intentionally.
// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. // NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9.
// https://gcc.gnu.org/gcc-4.9/changes.html // https://gcc.gnu.org/gcc-4.9/changes.html
#if defined(__GNUC__) && \ #if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined)
(defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER)) #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
__attribute__((no_sanitize_undefined))
#elif ABSL_HAVE_ATTRIBUTE(no_sanitize)
#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
__attribute__((no_sanitize("undefined"))) __attribute__((no_sanitize("undefined")))
#else #else
@ -283,7 +263,7 @@
// //
// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. // Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. // See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY) #if ABSL_HAVE_ATTRIBUTE(no_sanitize)
#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
#else #else
#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
@ -293,7 +273,7 @@
// //
// Tells the SafeStack to not instrument a given function. // Tells the SafeStack to not instrument a given function.
// See https://clang.llvm.org/docs/SafeStack.html for details. // See https://clang.llvm.org/docs/SafeStack.html for details.
#if defined(__GNUC__) && defined(SAFESTACK_SANITIZER) #if ABSL_HAVE_ATTRIBUTE(no_sanitize)
#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \
__attribute__((no_sanitize("safe-stack"))) __attribute__((no_sanitize("safe-stack")))
#else #else
@ -594,6 +574,86 @@
#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) #define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes)
#endif #endif
// ABSL_FALLTHROUGH_INTENDED
//
// Annotates implicit fall-through between switch labels, allowing a case to
// indicate intentional fallthrough and turn off warnings about any lack of a
// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by
// a semicolon and can be used in most places where `break` can, provided that
// no statements exist between it and the next switch label.
//
// Example:
//
// switch (x) {
// case 40:
// case 41:
// if (truth_is_out_there) {
// ++x;
// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations
// // in comments
// } else {
// return x;
// }
// case 42:
// ...
//
// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED
// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed
// when performing switch labels fall-through diagnostic
// (`-Wimplicit-fallthrough`). See clang documentation on language extensions
// for details:
// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
//
// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro
// has no effect on diagnostics. In any case this macro has no effect on runtime
// behavior and performance of code.
#ifdef ABSL_FALLTHROUGH_INTENDED
#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
#endif
// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported.
#if defined(__clang__) && defined(__has_warning)
#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
#endif
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
#endif
#ifndef ABSL_FALLTHROUGH_INTENDED
#define ABSL_FALLTHROUGH_INTENDED \
do { \
} while (0)
#endif
// ABSL_DEPRECATED()
//
// Marks a deprecated class, struct, enum, function, method and variable
// declarations. The macro argument is used as a custom diagnostic message (e.g.
// suggestion of a better alternative).
//
// Examples:
//
// class ABSL_DEPRECATED("Use Bar instead") Foo {...};
//
// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...}
//
// template <typename T>
// ABSL_DEPRECATED("Use DoThat() instead")
// void DoThis();
//
// Every usage of a deprecated entity will trigger a warning when compiled with
// clang's `-Wdeprecated-declarations` option. This option is turned off by
// default, but the warnings will be reported by clang-tidy.
#if defined(__clang__) && __cplusplus >= 201103L
#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
#endif
#ifndef ABSL_DEPRECATED
#define ABSL_DEPRECATED(message)
#endif
// ABSL_CONST_INIT // ABSL_CONST_INIT
// //
// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will // A variable declaration annotated with the `ABSL_CONST_INIT` attribute will

View file

@ -175,7 +175,7 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
std::memory_order_relaxed) || std::memory_order_relaxed) ||
base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
scheduling_mode) == kOnceInit) { scheduling_mode) == kOnceInit) {
base_internal::Invoke(std::forward<Callable>(fn), base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...); std::forward<Args>(args)...);
// The call to SpinLockWake below is an optimization, because the waiter // The call to SpinLockWake below is an optimization, because the waiter
// in SpinLockWait is waiting with a short timeout. The atomic load/store // in SpinLockWait is waiting with a short timeout. The atomic load/store

View file

@ -159,16 +159,19 @@ inline Dest bit_cast(const Source& source) {
return dest; return dest;
} }
// NOTE: This overload is only picked if the requirements of bit_cast are not // NOTE: This overload is only picked if the requirements of bit_cast are
// met. It is therefore UB, but is provided temporarily as previous versions of // not met. It is therefore UB, but is provided temporarily as previous
// this function template were unchecked. Do not use this in new code. // versions of this function template were unchecked. Do not use this in
// new code.
template < template <
typename Dest, typename Source, typename Dest, typename Source,
typename std::enable_if< typename std::enable_if<
!internal_casts::is_bitcastable<Dest, Source>::value, int>::type = 0> !internal_casts::is_bitcastable<Dest, Source>::value,
int>::type = 0>
ABSL_DEPRECATED( ABSL_DEPRECATED(
"absl::bit_cast type requirements were violated. Update the types being " "absl::bit_cast type requirements were violated. Update the types "
"used such that they are the same size and are both TriviallyCopyable.") "being used such that they are the same size and are both "
"TriviallyCopyable.")
inline Dest bit_cast(const Source& source) { inline Dest bit_cast(const Source& source) {
static_assert(sizeof(Dest) == sizeof(Source), static_assert(sizeof(Dest) == sizeof(Source),
"Source and destination types should have equal sizes."); "Source and destination types should have equal sizes.");

View file

@ -154,6 +154,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_INTERNAL_HAS_KEYWORD(x) 0 #define ABSL_INTERNAL_HAS_KEYWORD(x) 0
#endif #endif
#ifdef __has_feature
#define ABSL_HAVE_FEATURE(f) __has_feature(f)
#else
#define ABSL_HAVE_FEATURE(f) 0
#endif
// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. // ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
// We assume __thread is supported on Linux when compiled with Clang or compiled // We assume __thread is supported on Linux when compiled with Clang or compiled
// against libstdc++ with _GLIBCXX_HAVE_TLS defined. // against libstdc++ with _GLIBCXX_HAVE_TLS defined.
@ -226,11 +232,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator // * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
// targeting iOS 9.x. // targeting iOS 9.x.
// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time // * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
// making __has_feature unreliable there. // making ABSL_HAVE_FEATURE unreliable there.
// //
// Otherwise, `__has_feature` is only supported by Clang so it has be inside #if ABSL_HAVE_FEATURE(cxx_thread_local) && \
// `defined(__APPLE__)` check.
#if __has_feature(cxx_thread_local) && \
!(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
#define ABSL_HAVE_THREAD_LOCAL 1 #define ABSL_HAVE_THREAD_LOCAL 1
#endif #endif
@ -312,15 +316,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) #if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)
// Clang >= 3.6 // Clang >= 3.6
#if __has_feature(cxx_exceptions) #if ABSL_HAVE_FEATURE(cxx_exceptions)
#define ABSL_HAVE_EXCEPTIONS 1 #define ABSL_HAVE_EXCEPTIONS 1
#endif // __has_feature(cxx_exceptions) #endif // ABSL_HAVE_FEATURE(cxx_exceptions)
#else #else
// Clang < 3.6 // Clang < 3.6
// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro // http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro
#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) #if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
#define ABSL_HAVE_EXCEPTIONS 1 #define ABSL_HAVE_EXCEPTIONS 1
#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) #endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
#endif // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) #endif // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)
// Handle remaining special cases and default to exceptions being supported. // Handle remaining special cases and default to exceptions being supported.
@ -360,7 +364,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \
defined(__ASYLO__) defined(__ASYLO__) || defined(__myriad2__)
#define ABSL_HAVE_MMAP 1 #define ABSL_HAVE_MMAP 1
#endif #endif
@ -470,9 +474,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
(defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
(defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 120000) || \ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
(defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 50000)) __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else #else
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
@ -661,4 +665,50 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_DLL #define ABSL_DLL
#endif // defined(_MSC_VER) #endif // defined(_MSC_VER)
// ABSL_HAVE_MEMORY_SANITIZER
//
// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of
// a compiler instrumentation module and a run-time library.
#ifdef ABSL_HAVE_MEMORY_SANITIZER
#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
#elif defined(MEMORY_SANITIZER)
// The MEMORY_SANITIZER macro is deprecated but we will continue to honor it
// for now.
#define ABSL_HAVE_MEMORY_SANITIZER 1
#elif defined(__SANITIZE_MEMORY__)
#define ABSL_HAVE_MEMORY_SANITIZER 1
#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer)
#define ABSL_HAVE_MEMORY_SANITIZER 1
#endif
// ABSL_HAVE_THREAD_SANITIZER
//
// ThreadSanitizer (TSan) is a fast data race detector.
#ifdef ABSL_HAVE_THREAD_SANITIZER
#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set."
#elif defined(THREAD_SANITIZER)
// The THREAD_SANITIZER macro is deprecated but we will continue to honor it
// for now.
#define ABSL_HAVE_THREAD_SANITIZER 1
#elif defined(__SANITIZE_THREAD__)
#define ABSL_HAVE_THREAD_SANITIZER 1
#elif ABSL_HAVE_FEATURE(thread_sanitizer)
#define ABSL_HAVE_THREAD_SANITIZER 1
#endif
// ABSL_HAVE_ADDRESS_SANITIZER
//
// AddressSanitizer (ASan) is a fast memory error detector.
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set."
#elif defined(ADDRESS_SANITIZER)
// The ADDRESS_SANITIZER macro is deprecated but we will continue to honor it
// for now.
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#elif defined(__SANITIZE_ADDRESS__)
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#elif ABSL_HAVE_FEATURE(address_sanitizer)
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#endif
#endif // ABSL_BASE_CONFIG_H_ #endif // ABSL_BASE_CONFIG_H_

View file

@ -1,72 +0,0 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdlib.h>
#include <string.h>
#include "absl/base/dynamic_annotations.h"
// Compiler-based ThreadSanitizer defines
// DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
// and provides its own definitions of the functions.
#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
#endif
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__)
extern "C" {
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
return 0;
}
// See the comments in dynamic_annotations.h
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
int local_running_on_valgrind = running_on_valgrind;
// C doesn't have thread-safe initialization of statics, and we
// don't want to depend on pthread_once here, so hack it.
ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack");
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}
// See the comments in dynamic_annotations.h
double ValgrindSlowdown(void) {
// Same initialization hack as in RunningOnValgrind().
static volatile double slowdown = 0.0;
double local_slowdown = slowdown;
ANNOTATE_BENIGN_RACE(&slowdown, "safe hack");
if (RunningOnValgrind() == 0) {
return 1.0;
}
if (local_slowdown == 0.0) {
char *env = getenv("VALGRIND_SLOWDOWN");
slowdown = local_slowdown = env ? atof(env) : 50.0;
}
return local_slowdown;
}
} // extern "C"
#endif // DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0

View file

@ -47,25 +47,19 @@
#include <stddef.h> #include <stddef.h>
#include "absl/base/attributes.h"
#include "absl/base/config.h" #include "absl/base/config.h"
#ifdef __cplusplus
#include "absl/base/macros.h"
#endif
// TODO(rogeeff): Remove after the backward compatibility period. // TODO(rogeeff): Remove after the backward compatibility period.
#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export #include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Decide which features are enabled // Decide which features are enabled.
#ifndef DYNAMIC_ANNOTATIONS_ENABLED #ifdef ABSL_HAVE_THREAD_SANITIZER
#define DYNAMIC_ANNOTATIONS_ENABLED 0
#endif
#if defined(__clang__) && !defined(SWIG)
#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
#else
#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 0
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0
#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
@ -85,25 +79,20 @@
// will issue a warning, if these attributes are compiled. Only include them // will issue a warning, if these attributes are compiled. Only include them
// when compiling using Clang. // when compiling using Clang.
// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 #if defined(__clang__)
#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1
ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED #if !defined(SWIG)
#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
#endif
#else
#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
#endif
// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. // Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
ABSL_INTERNAL_ANNOTALYSIS_ENABLED ABSL_INTERNAL_ANNOTALYSIS_ENABLED
#endif
// Memory annotations are also made available to LLVM's Memory Sanitizer #endif // ABSL_HAVE_THREAD_SANITIZER
#if defined(MEMORY_SANITIZER) && defined(__has_feature) && \
!defined(__native_client__)
#if __has_feature(memory_sanitizer)
#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
#endif
#endif
#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
#endif
#ifdef __cplusplus #ifdef __cplusplus
#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { #define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
@ -165,7 +154,7 @@
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
// Report that a linker initialized lock has been created at address `lock`. // Report that a linker initialized lock has been created at address `lock`.
#ifdef THREAD_SANITIZER #ifdef ABSL_HAVE_THREAD_SANITIZER
#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
(__FILE__, __LINE__, lock) (__FILE__, __LINE__, lock)
@ -243,7 +232,7 @@ ABSL_INTERNAL_END_EXTERN_C
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Define memory annotations. // Define memory annotations.
#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 #ifdef ABSL_HAVE_MEMORY_SANITIZER
#include <sanitizer/msan_interface.h> #include <sanitizer/msan_interface.h>
@ -253,9 +242,10 @@ ABSL_INTERNAL_END_EXTERN_C
#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
__msan_allocated_memory(address, size) __msan_allocated_memory(address, size)
#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 #else // !defined(ABSL_HAVE_MEMORY_SANITIZER)
#if DYNAMIC_ANNOTATIONS_ENABLED == 1 // TODO(rogeeff): remove this branch
#ifdef ABSL_HAVE_THREAD_SANITIZER
#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
do { \ do { \
(void)(address); \ (void)(address); \
@ -273,24 +263,24 @@ ABSL_INTERNAL_END_EXTERN_C
#endif #endif
#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED #endif // ABSL_HAVE_MEMORY_SANITIZER
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Define IGNORE_READS_BEGIN/_END attributes. // Define IGNORE_READS_BEGIN/_END attributes.
#if ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED == 1 #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
__attribute((exclusive_lock_function("*"))) __attribute((exclusive_lock_function("*")))
#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
__attribute((unlock_function("*"))) __attribute((unlock_function("*")))
#else // ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED == 0 #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
#endif // ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED #endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Define IGNORE_READS_BEGIN/_END annotations. // Define IGNORE_READS_BEGIN/_END annotations.
@ -429,46 +419,35 @@ ABSL_NAMESPACE_END
#endif #endif
#ifdef __cplusplus
#ifdef ABSL_HAVE_THREAD_SANITIZER
ABSL_INTERNAL_BEGIN_EXTERN_C ABSL_INTERNAL_BEGIN_EXTERN_C
int RunningOnValgrind();
// ------------------------------------------------------------------------- double ValgrindSlowdown();
// Return non-zero value if running under valgrind.
//
// If "valgrind.h" is included into dynamic_annotations.cc,
// the regular valgrind mechanism will be used.
// See http://valgrind.org/docs/manual/manual-core-adv.html about
// RUNNING_ON_VALGRIND and other valgrind "client requests".
// The file "valgrind.h" may be obtained by doing
// svn co svn://svn.valgrind.org/valgrind/trunk/include
//
// If for some reason you can't use "valgrind.h" or want to fake valgrind,
// there are two ways to make this function return non-zero:
// - Use environment variable: export RUNNING_ON_VALGRIND=1
// - Make your tool intercept the function RunningOnValgrind() and
// change its return value.
//
int RunningOnValgrind(void);
// ValgrindSlowdown returns:
// * 1.0, if (RunningOnValgrind() == 0)
// * 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") ==
// NULL)
// * atof(getenv("VALGRIND_SLOWDOWN")) otherwise
// This function can be used to scale timeout values:
// EXAMPLE:
// for (;;) {
// DoExpensiveBackgroundTask();
// SleepForSeconds(5 * ValgrindSlowdown());
// }
//
double ValgrindSlowdown(void);
ABSL_INTERNAL_END_EXTERN_C ABSL_INTERNAL_END_EXTERN_C
#else
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
ABSL_DEPRECATED(
"Don't use this interface. It is misleading and is being deleted.")
ABSL_ATTRIBUTE_ALWAYS_INLINE inline int RunningOnValgrind() { return 0; }
ABSL_DEPRECATED(
"Don't use this interface. It is misleading and is being deleted.")
ABSL_ATTRIBUTE_ALWAYS_INLINE inline double ValgrindSlowdown() { return 1.0; }
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
using absl::base_internal::RunningOnValgrind;
using absl::base_internal::ValgrindSlowdown;
#endif
#endif
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Address sanitizer annotations // Address sanitizer annotations
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
// Describe the current state of a contiguous container such as e.g. // Describe the current state of a contiguous container such as e.g.
// std::vector or std::string. For more details see // std::vector or std::string. For more details see
// sanitizer/common_interface_defs.h, which is provided by the compiler. // sanitizer/common_interface_defs.h, which is provided by the compiler.
@ -483,16 +462,15 @@ ABSL_INTERNAL_END_EXTERN_C
#else #else
#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) #define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty
#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") #define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
#endif // ADDRESS_SANITIZER #endif // ABSL_HAVE_ADDRESS_SANITIZER
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Undefine the macros intended only for this file. // Undefine the macros intended only for this file.
#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED #undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED

View file

@ -83,10 +83,11 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
#elif defined(_MSC_VER) && !defined(__clang__) #elif defined(_MSC_VER) && !defined(__clang__)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int) unsigned long result = 0; // NOLINT(runtime/int)
if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { if ((n >> 32) &&
_BitScanReverse(&result, static_cast<unsigned long>(n >> 32))) {
return 31 - result; return 31 - result;
} }
if (_BitScanReverse(&result, n)) { if (_BitScanReverse(&result, static_cast<unsigned long>(n))) {
return 63 - result; return 63 - result;
} }
return 64; return 64;
@ -170,10 +171,10 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
#elif defined(_MSC_VER) && !defined(__clang__) #elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int) unsigned long result = 0; // NOLINT(runtime/int)
if (static_cast<uint32_t>(n) == 0) { if (static_cast<uint32_t>(n) == 0) {
_BitScanForward(&result, n >> 32); _BitScanForward(&result, static_cast<unsigned long>(n >> 32));
return result + 32; return result + 32;
} }
_BitScanForward(&result, n); _BitScanForward(&result, static_cast<unsigned long>(n));
return result; return result;
#elif defined(__GNUC__) || defined(__clang__) #elif defined(__GNUC__) || defined(__clang__)
static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)

View file

@ -58,8 +58,6 @@
#if defined(__clang__) && !defined(SWIG) #if defined(__clang__) && !defined(SWIG)
#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 #define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
#else
#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 0
#endif #endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0 #if DYNAMIC_ANNOTATIONS_ENABLED != 0
@ -84,19 +82,16 @@
// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 // ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. // Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
ABSL_INTERNAL_ANNOTALYSIS_ENABLED ABSL_INTERNAL_ANNOTALYSIS_ENABLED
#endif #endif
// Memory annotations are also made available to LLVM's Memory Sanitizer // Memory annotations are also made available to LLVM's Memory Sanitizer
#if defined(MEMORY_SANITIZER) && defined(__has_feature) && \ #if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
!defined(__native_client__)
#if __has_feature(memory_sanitizer)
#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
#endif #endif
#endif
#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED #ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 #define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
@ -162,7 +157,7 @@
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
// Report that a linker initialized lock has been created at address `lock`. // Report that a linker initialized lock has been created at address `lock`.
#ifdef THREAD_SANITIZER #ifdef ABSL_HAVE_THREAD_SANITIZER
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
(__FILE__, __LINE__, lock) (__FILE__, __LINE__, lock)
@ -250,19 +245,19 @@
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Define IGNORE_READS_BEGIN/_END attributes. // Define IGNORE_READS_BEGIN/_END attributes.
#if ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED == 1 #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
__attribute((exclusive_lock_function("*"))) __attribute((exclusive_lock_function("*")))
#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
__attribute((unlock_function("*"))) __attribute((unlock_function("*")))
#else // ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED == 0 #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
#endif // ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED #endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Define IGNORE_READS_BEGIN/_END annotations. // Define IGNORE_READS_BEGIN/_END annotations.
@ -367,7 +362,7 @@
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Address sanitizer annotations // Address sanitizer annotations
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
// Describe the current state of a contiguous container such as e.g. // Describe the current state of a contiguous container such as e.g.
// std::vector or std::string. For more details see // std::vector or std::string. For more details see
// sanitizer/common_interface_defs.h, which is provided by the compiler. // sanitizer/common_interface_defs.h, which is provided by the compiler.
@ -385,7 +380,7 @@
#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") #define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
#endif // ADDRESS_SANITIZER #endif // ABSL_HAVE_ADDRESS_SANITIZER
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Undefine the macros intended only for this file. // Undefine the macros intended only for this file.

View file

@ -185,7 +185,7 @@ TEST(ExponentialBiasedTest, InitializationModes) {
ABSL_CONST_INIT static ExponentialBiased eb_static; ABSL_CONST_INIT static ExponentialBiased eb_static;
EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0)); EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0));
#if ABSL_HAVE_THREAD_LOCAL #ifdef ABSL_HAVE_THREAD_LOCAL
thread_local ExponentialBiased eb_thread; thread_local ExponentialBiased eb_thread;
EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0)); EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0));
#endif #endif

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
// absl::base_internal::Invoke(f, args...) is an implementation of // absl::base_internal::invoke(f, args...) is an implementation of
// INVOKE(f, args...) from section [func.require] of the C++ standard. // INVOKE(f, args...) from section [func.require] of the C++ standard.
// //
// [func.require] // [func.require]
@ -29,7 +29,7 @@
// is not one of the types described in the previous item; // is not one of the types described in the previous item;
// 5. f(t1, t2, ..., tN) in all other cases. // 5. f(t1, t2, ..., tN) in all other cases.
// //
// The implementation is SFINAE-friendly: substitution failure within Invoke() // The implementation is SFINAE-friendly: substitution failure within invoke()
// isn't an error. // isn't an error.
#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ #ifndef ABSL_BASE_INTERNAL_INVOKE_H_
@ -170,13 +170,13 @@ struct Invoker {
// The result type of Invoke<F, Args...>. // The result type of Invoke<F, Args...>.
template <typename F, typename... Args> template <typename F, typename... Args>
using InvokeT = decltype(Invoker<F, Args...>::type::Invoke( using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
std::declval<F>(), std::declval<Args>()...)); std::declval<F>(), std::declval<Args>()...));
// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section // Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
// [func.require] of the C++ standard. // [func.require] of the C++ standard.
template <typename F, typename... Args> template <typename F, typename... Args>
InvokeT<F, Args...> Invoke(F&& f, Args&&... args) { invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f), return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...); std::forward<Args>(args)...);
} }

View file

@ -598,7 +598,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
section.Leave(); section.Leave();
result = &s->levels; result = &s->levels;
} }
ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
return result; return result;
} }

View file

@ -18,6 +18,7 @@
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ #ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ #define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h" #include "absl/base/macros.h"
@ -29,6 +30,13 @@ extern "C" void __google_enable_rescheduling(bool disable_result);
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
class CondVar;
class Mutex;
namespace synchronization_internal {
int MutexDelay(int32_t c, int mode);
} // namespace synchronization_internal
namespace base_internal { namespace base_internal {
class SchedulingHelper; // To allow use of SchedulingGuard. class SchedulingHelper; // To allow use of SchedulingGuard.
@ -53,6 +61,8 @@ class SchedulingGuard {
public: public:
// Returns true iff the calling thread may be cooperatively rescheduled. // Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed(); static bool ReschedulingIsAllowed();
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
private: private:
// Disable cooperative rescheduling of the calling thread. It may still // Disable cooperative rescheduling of the calling thread. It may still
@ -76,12 +86,23 @@ class SchedulingGuard {
bool disabled; bool disabled;
}; };
// Access to SchedulingGuard is explicitly white-listed. // A scoped helper to enable rescheduling temporarily.
// REQUIRES: destructor must run in same thread as constructor.
class ScopedEnable {
public:
ScopedEnable();
~ScopedEnable();
private:
int scheduling_disabled_depth_;
};
// Access to SchedulingGuard is explicitly permitted.
friend class absl::CondVar;
friend class absl::Mutex;
friend class SchedulingHelper; friend class SchedulingHelper;
friend class SpinLock; friend class SpinLock;
friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
}; };
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -100,6 +121,12 @@ inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
return; return;
} }
inline SchedulingGuard::ScopedEnable::ScopedEnable()
: scheduling_disabled_depth_(0) {}
inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
}
} // namespace base_internal } // namespace base_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

View file

@ -69,7 +69,7 @@
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. // TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a // Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
// whitelisted set of platforms for which we expect not to be able to raw log. // selected set of platforms for which we expect not to be able to raw log.
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
absl::raw_logging_internal::LogPrefixHook> absl::raw_logging_internal::LogPrefixHook>
@ -227,7 +227,7 @@ bool RawLoggingFullySupported() {
#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED #endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
} }
ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction> absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog); internal_log_function(DefaultInternalLog);

View file

@ -72,10 +72,14 @@
// //
// The API is a subset of the above: each macro only takes two arguments. Use // The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message. // StrCat if you need to build a richer message.
#define ABSL_INTERNAL_LOG(severity, message) \ #define ABSL_INTERNAL_LOG(severity, message) \
do { \ do { \
::absl::raw_logging_internal::internal_log_function( \ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
ABSL_RAW_LOGGING_INTERNAL_##severity, __FILE__, __LINE__, message); \ ::absl::raw_logging_internal::internal_log_function( \
ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_filename, __LINE__, message); \
if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
ABSL_INTERNAL_UNREACHABLE; \
} while (0) } while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \ #define ABSL_INTERNAL_CHECK(condition, message) \
@ -170,7 +174,7 @@ using InternalLogFunction = void (*)(absl::LogSeverity severity,
const char* file, int line, const char* file, int line,
const std::string& message); const std::string& message);
ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES extern base_internal::AtomicHook< ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction> InternalLogFunction>
internal_log_function; internal_log_function;

View file

@ -64,7 +64,14 @@ class ABSL_LOCKABLE SpinLock {
constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
// For global SpinLock instances prefer trivial destructor when possible.
// Default but non-trivial destructor in some build configurations causes an
// extra static initializer.
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
#else
~SpinLock() = default;
#endif
// Acquire this SpinLock. // Acquire this SpinLock.
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {

View file

@ -14,6 +14,7 @@
#include "absl/base/internal/strerror.h" #include "absl/base/internal/strerror.h"
#include <array>
#include <cerrno> #include <cerrno>
#include <cstddef> #include <cstddef>
#include <cstdio> #include <cstdio>
@ -21,13 +22,13 @@
#include <string> #include <string>
#include <type_traits> #include <type_traits>
#include "absl/base/attributes.h"
#include "absl/base/internal/errno_saver.h" #include "absl/base/internal/errno_saver.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace base_internal { namespace base_internal {
namespace { namespace {
const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
#if defined(_WIN32) #if defined(_WIN32)
int rc = strerror_s(buf, buflen, errnum); int rc = strerror_s(buf, buflen, errnum);
@ -35,15 +36,6 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0'; if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
return buf; return buf;
#else #else
#if defined(__GLIBC__) || defined(__APPLE__)
// Use the BSD sys_errlist API provided by GNU glibc and others to
// avoid any need to copy the message into the local buffer first.
if (0 <= errnum && errnum < sys_nerr) {
if (const char* p = sys_errlist[errnum]) {
return p;
}
}
#endif
// The type of `ret` is platform-specific; both of these branches must compile // The type of `ret` is platform-specific; both of these branches must compile
// either way but only one will execute on any given platform: // either way but only one will execute on any given platform:
auto ret = strerror_r(errnum, buf, buflen); auto ret = strerror_r(errnum, buf, buflen);
@ -57,9 +49,8 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
} }
#endif #endif
} }
} // namespace
std::string StrError(int errnum) { std::string StrErrorInternal(int errnum) {
absl::base_internal::ErrnoSaver errno_saver; absl::base_internal::ErrnoSaver errno_saver;
char buf[100]; char buf[100];
const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
@ -70,6 +61,28 @@ std::string StrError(int errnum) {
return str; return str;
} }
// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back
// to `StrErrorAdaptor()` if the value is larger than this.
constexpr int kSysNerr = 135;
std::array<std::string, kSysNerr>* NewStrErrorTable() {
auto* table = new std::array<std::string, kSysNerr>;
for (int i = 0; i < static_cast<int>(table->size()); ++i) {
(*table)[i] = StrErrorInternal(i);
}
return table;
}
} // namespace
std::string StrError(int errnum) {
static const auto* table = NewStrErrorTable();
if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
return (*table)[errnum];
}
return StrErrorInternal(errnum);
}
} // namespace base_internal } // namespace base_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

View file

@ -20,15 +20,6 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace { namespace {
#if defined(__GLIBC__) || defined(__APPLE__)
void BM_SysErrList(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(std::string(sys_errlist[ERANGE]));
}
}
BENCHMARK(BM_SysErrList);
#endif
void BM_AbslStrError(benchmark::State& state) { void BM_AbslStrError(benchmark::State& state) {
for (auto _ : state) { for (auto _ : state) {
benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE)); benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE));

View file

@ -39,6 +39,7 @@
#endif #endif
#include <string.h> #include <string.h>
#include <cassert> #include <cassert>
#include <cstdint> #include <cstdint>
#include <cstdio> #include <cstdio>
@ -50,6 +51,7 @@
#include <vector> #include <vector>
#include "absl/base/call_once.h" #include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h" #include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h" #include "absl/base/internal/unscaledcycleclock.h"
@ -420,6 +422,18 @@ pid_t GetTID() {
#endif #endif
// GetCachedTID() caches the thread ID in thread-local storage (which is a
// userspace construct) to avoid unnecessary system calls. Without this caching,
// it can take roughly 98ns, while it takes roughly 1ns with this caching.
pid_t GetCachedTID() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID();
return thread_id;
#else
return GetTID();
#endif // ABSL_HAVE_THREAD_LOCAL
}
} // namespace base_internal } // namespace base_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

View file

@ -30,6 +30,7 @@
#include <cstdint> #include <cstdint>
#include "absl/base/config.h"
#include "absl/base/port.h" #include "absl/base/port.h"
namespace absl { namespace absl {
@ -59,6 +60,13 @@ using pid_t = uint32_t;
#endif #endif
pid_t GetTID(); pid_t GetTID();
// Like GetTID(), but caches the result in thread-local storage in order
// to avoid unnecessary system calls. Note that there are some cases where
// one must call through to GetTID directly, which is why this exists as a
// separate function. For example, GetCachedTID() is not safe to call in
// an asynchronous signal-handling context nor right after a call to fork().
pid_t GetCachedTID();
} // namespace base_internal } // namespace base_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

View file

@ -23,6 +23,7 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h" #include "absl/base/call_once.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h" #include "absl/base/internal/spinlock.h"
@ -53,9 +54,11 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// exist within a process (via dlopen() or similar), references to // exist within a process (via dlopen() or similar), references to
// thread_identity_ptr from each instance of the code will refer to // thread_identity_ptr from each instance of the code will refer to
// *different* instances of this ptr. // *different* instances of this ptr.
#ifdef __GNUC__ // Apple platforms have the visibility attribute, but issue a compile warning
// that protected visibility is unsupported.
#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected"))) __attribute__((visibility("protected")))
#endif // __GNUC__ #endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS #if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster. // Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;

View file

@ -32,6 +32,7 @@
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h" #include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
@ -69,30 +70,28 @@ struct PerThreadSynch {
// is using this PerThreadSynch as a terminator. Its // is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop // skip field must not be filled in because the loop
// might then skip over the terminator. // might then skip over the terminator.
bool wake; // This thread is to be woken from a Mutex.
// The wait parameters of the current wait. waitp is null if the // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// thread is not waiting. Transitions from null to non-null must // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
// occur before the enqueue commit point (state = kQueued in //
// Enqueue() and CondVarEnqueue()). Transitions from non-null to // The value of "x->cond_waiter" is meaningless if "x" is not on a
// null must occur after the wait is finished (state = kAvailable in // Mutex waiter list.
// Mutex::Block() and CondVar::WaitCommon()). This field may be bool cond_waiter;
// changed only by the thread that describes this PerThreadSynch. A bool maybe_unlocking; // Valid at head of Mutex waiter queue;
// special case is Fer(), which calls Enqueue() on another thread, // true if UnlockSlow could be searching
// but with an identical SynchWaitParams pointer, thus leaving the // for a waiter to wake. Used for an optimization
// pointer unchanged. // in Enqueue(). true is always a valid value.
SynchWaitParams *waitp; // Can be reset to false when the unlocker or any
// writer releases the lock, or a reader fully
bool suppress_fatal_errors; // If true, try to proceed even in the face of // releases the lock. It may not be set to false
// broken invariants. This is used within fatal // by a reader that decrements the count to
// signal handlers to improve the chances of // non-zero. protected by mutex spinlock
// debug logging information being output bool suppress_fatal_errors; // If true, try to proceed even in the face
// successfully. // of broken invariants. This is used within
// fatal signal handlers to improve the
intptr_t readers; // Number of readers in mutex. // chances of debug logging information being
int priority; // Priority of thread (updated every so often). // output successfully.
int priority; // Priority of thread (updated every so often).
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
// State values: // State values:
// kAvailable: This PerThreadSynch is available. // kAvailable: This PerThreadSynch is available.
@ -111,30 +110,30 @@ struct PerThreadSynch {
}; };
std::atomic<State> state; std::atomic<State> state;
bool maybe_unlocking; // Valid at head of Mutex waiter queue; // The wait parameters of the current wait. waitp is null if the
// true if UnlockSlow could be searching // thread is not waiting. Transitions from null to non-null must
// for a waiter to wake. Used for an optimization // occur before the enqueue commit point (state = kQueued in
// in Enqueue(). true is always a valid value. // Enqueue() and CondVarEnqueue()). Transitions from non-null to
// Can be reset to false when the unlocker or any // null must occur after the wait is finished (state = kAvailable in
// writer releases the lock, or a reader fully releases // Mutex::Block() and CondVar::WaitCommon()). This field may be
// the lock. It may not be set to false by a reader // changed only by the thread that describes this PerThreadSynch. A
// that decrements the count to non-zero. // special case is Fer(), which calls Enqueue() on another thread,
// protected by mutex spinlock // but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams* waitp;
bool wake; // This thread is to be woken from a Mutex. intptr_t readers; // Number of readers in mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the // When priority will next be read (cycles).
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await. int64_t next_priority_read_cycles;
//
// The value of "x->cond_waiter" is meaningless if "x" is not on a
// Mutex waiter list.
bool cond_waiter;
// Locks held; used during deadlock detection. // Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks; SynchLocksHeld *all_locks;
}; };
// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity { struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that // Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is // the PerThreadSynch object associated with each thread is
@ -212,7 +211,9 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE #define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__) #elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ #elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L) (__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not // Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc. // present in the upstream eglibc.

View file

@ -75,7 +75,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
// - If a thread implementation chooses to recycle threads, that // - If a thread implementation chooses to recycle threads, that
// correct re-initialization occurs. // correct re-initialization occurs.
static const int kNumLoops = 3; static const int kNumLoops = 3;
static const int kNumThreads = 400; static const int kNumThreads = 32;
for (int iter = 0; iter < kNumLoops; iter++) { for (int iter = 0; iter < kNumLoops; iter++) {
std::vector<std::thread> threads; std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) { for (int i = 0; i < kNumThreads; ++i) {

View file

@ -18,6 +18,7 @@
#include <functional> #include <functional>
#include <new> #include <new>
#include <stdexcept> #include <stdexcept>
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
@ -25,83 +26,186 @@ namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace base_internal { namespace base_internal {
// NOTE: The various STL exception throwing functions are placed within the
// #ifdef blocks so the symbols aren't exposed on platforms that don't support
// them, such as the Android NDK. For example, ANGLE fails to link when building
// within AOSP without them, since the STL functions don't exist.
namespace { namespace {
#ifdef ABSL_HAVE_EXCEPTIONS
template <typename T> template <typename T>
[[noreturn]] void Throw(const T& error) { [[noreturn]] void Throw(const T& error) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw error; throw error;
#else
ABSL_RAW_LOG(FATAL, "%s", error.what());
std::abort();
#endif
} }
#endif
} // namespace } // namespace
void ThrowStdLogicError(const std::string& what_arg) { void ThrowStdLogicError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg)); Throw(std::logic_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdLogicError(const char* what_arg) { void ThrowStdLogicError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg)); Throw(std::logic_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdInvalidArgument(const std::string& what_arg) { void ThrowStdInvalidArgument(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg)); Throw(std::invalid_argument(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdInvalidArgument(const char* what_arg) { void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg)); Throw(std::invalid_argument(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdDomainError(const std::string& what_arg) { void ThrowStdDomainError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg)); Throw(std::domain_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdDomainError(const char* what_arg) { void ThrowStdDomainError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg)); Throw(std::domain_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdLengthError(const std::string& what_arg) { void ThrowStdLengthError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg)); Throw(std::length_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdLengthError(const char* what_arg) { void ThrowStdLengthError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg)); Throw(std::length_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdOutOfRange(const std::string& what_arg) { void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg)); Throw(std::out_of_range(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdOutOfRange(const char* what_arg) { void ThrowStdOutOfRange(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg)); Throw(std::out_of_range(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdRuntimeError(const std::string& what_arg) { void ThrowStdRuntimeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg)); Throw(std::runtime_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdRuntimeError(const char* what_arg) { void ThrowStdRuntimeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg)); Throw(std::runtime_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdRangeError(const std::string& what_arg) { void ThrowStdRangeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg)); Throw(std::range_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdRangeError(const char* what_arg) { void ThrowStdRangeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg)); Throw(std::range_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdOverflowError(const std::string& what_arg) { void ThrowStdOverflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg)); Throw(std::overflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdOverflowError(const char* what_arg) { void ThrowStdOverflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg)); Throw(std::overflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdUnderflowError(const std::string& what_arg) { void ThrowStdUnderflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg)); Throw(std::underflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
} }
void ThrowStdUnderflowError(const char* what_arg) { void ThrowStdUnderflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg)); Throw(std::underflow_error(what_arg));
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
} }
void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } void ThrowStdBadFunctionCall() {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::bad_function_call());
#else
std::abort();
#endif
}
void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } void ThrowStdBadAlloc() {
#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::bad_alloc());
#else
std::abort();
#endif
}
} // namespace base_internal } // namespace base_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

View file

@ -19,6 +19,8 @@
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ #ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ #define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#include "absl/base/config.h"
// ABSL_INTERNAL_HAVE_TSAN_INTERFACE // ABSL_INTERNAL_HAVE_TSAN_INTERFACE
// Macro intended only for internal use. // Macro intended only for internal use.
// //
@ -28,7 +30,7 @@
#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." #error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
#endif #endif
#if defined(THREAD_SANITIZER) && defined(__has_include) #if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
#if __has_include(<sanitizer/tsan_interface.h>) #if __has_include(<sanitizer/tsan_interface.h>)
#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 #define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
#endif #endif

View file

@ -31,80 +31,6 @@
// The unaligned API is C++ only. The declarations use C++ features // The unaligned API is C++ only. The declarations use C++ features
// (namespaces, inline) which are absent or incompatible in C. // (namespaces, inline) which are absent or incompatible in C.
#if defined(__cplusplus) #if defined(__cplusplus)
#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
defined(MEMORY_SANITIZER)
// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
// will miss a bug if 08 is the first unaddressable byte.
// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
// miss a race between this access and some other accesses to 08.
// MemorySanitizer will correctly propagate the shadow on unaligned stores
// and correctly report bugs on unaligned loads, but it may not properly
// update and report the origin of the uninitialized memory.
// For all three tools, replacing an unaligned access with a tool-specific
// callback solves the problem.
// Make sure uint16_t/uint32_t/uint64_t are defined.
#include <stdint.h>
extern "C" {
uint16_t __sanitizer_unaligned_load16(const void *p);
uint32_t __sanitizer_unaligned_load32(const void *p);
uint64_t __sanitizer_unaligned_load64(const void *p);
void __sanitizer_unaligned_store16(void *p, uint16_t v);
void __sanitizer_unaligned_store32(void *p, uint32_t v);
void __sanitizer_unaligned_store64(void *p, uint64_t v);
} // extern "C"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
inline uint16_t UnalignedLoad16(const void *p) {
return __sanitizer_unaligned_load16(p);
}
inline uint32_t UnalignedLoad32(const void *p) {
return __sanitizer_unaligned_load32(p);
}
inline uint64_t UnalignedLoad64(const void *p) {
return __sanitizer_unaligned_load64(p);
}
inline void UnalignedStore16(void *p, uint16_t v) {
__sanitizer_unaligned_store16(p, v);
}
inline void UnalignedStore32(void *p, uint32_t v) {
__sanitizer_unaligned_store32(p, v);
}
inline void UnalignedStore64(void *p, uint64_t v) {
__sanitizer_unaligned_store64(p, v);
}
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
(absl::base_internal::UnalignedLoad16(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
(absl::base_internal::UnalignedLoad32(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
(absl::base_internal::UnalignedLoad64(_p))
#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
(absl::base_internal::UnalignedStore16(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
(absl::base_internal::UnalignedStore32(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val))
#else
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace base_internal { namespace base_internal {
@ -151,8 +77,6 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val)) (absl::base_internal::UnalignedStore64(_p, _val))
#endif
#endif // defined(__cplusplus), end of unaligned API #endif // defined(__cplusplus), end of unaligned API
#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ #endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_

View file

@ -123,9 +123,7 @@ double UnscaledCycleClock::Frequency() {
#pragma intrinsic(__rdtsc) #pragma intrinsic(__rdtsc)
int64_t UnscaledCycleClock::Now() { int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
return __rdtsc();
}
double UnscaledCycleClock::Frequency() { double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency(); return base_internal::NominalCPUFrequency();

View file

@ -15,8 +15,8 @@
// UnscaledCycleClock // UnscaledCycleClock
// An UnscaledCycleClock yields the value and frequency of a cycle counter // An UnscaledCycleClock yields the value and frequency of a cycle counter
// that increments at a rate that is approximately constant. // that increments at a rate that is approximately constant.
// This class is for internal / whitelisted use only, you should consider // This class is for internal use only, you should consider using CycleClock
// using CycleClock instead. // instead.
// //
// Notes: // Notes:
// The cycle counter frequency is not necessarily the core clock frequency. // The cycle counter frequency is not necessarily the core clock frequency.
@ -109,7 +109,7 @@ class UnscaledCycleClock {
// value. // value.
static double Frequency(); static double Frequency();
// Whitelisted friends. // Allowed users
friend class base_internal::CycleClock; friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;

View file

@ -86,71 +86,73 @@ struct FlipFlop {
int member; int member;
}; };
// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending // CallMaybeWithArg(f) resolves either to invoke(f) or invoke(f, 42), depending
// on which one is valid. // on which one is valid.
template <typename F> template <typename F>
decltype(Invoke(std::declval<const F&>())) CallMaybeWithArg(const F& f) { decltype(base_internal::invoke(std::declval<const F&>())) CallMaybeWithArg(
return Invoke(f); const F& f) {
return base_internal::invoke(f);
} }
template <typename F> template <typename F>
decltype(Invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(const F& f) { decltype(base_internal::invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(
return Invoke(f, 42); const F& f) {
return base_internal::invoke(f, 42);
} }
TEST(InvokeTest, Function) { TEST(InvokeTest, Function) {
EXPECT_EQ(1, Invoke(Function, 3, 2)); EXPECT_EQ(1, base_internal::invoke(Function, 3, 2));
EXPECT_EQ(1, Invoke(&Function, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Function, 3, 2));
} }
TEST(InvokeTest, NonCopyableArgument) { TEST(InvokeTest, NonCopyableArgument) {
EXPECT_EQ(42, Invoke(Sink, make_unique<int>(42))); EXPECT_EQ(42, base_internal::invoke(Sink, make_unique<int>(42)));
} }
TEST(InvokeTest, NonCopyableResult) { TEST(InvokeTest, NonCopyableResult) {
EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42)); EXPECT_THAT(base_internal::invoke(Factory, 42), ::testing::Pointee(42));
} }
TEST(InvokeTest, VoidResult) { TEST(InvokeTest, VoidResult) { base_internal::invoke(NoOp); }
Invoke(NoOp);
}
TEST(InvokeTest, ConstFunctor) { TEST(InvokeTest, ConstFunctor) {
EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(ConstFunctor(), 3, 2));
} }
TEST(InvokeTest, MutableFunctor) { TEST(InvokeTest, MutableFunctor) {
MutableFunctor f; MutableFunctor f;
EXPECT_EQ(1, Invoke(f, 3, 2)); EXPECT_EQ(1, base_internal::invoke(f, 3, 2));
EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(MutableFunctor(), 3, 2));
} }
TEST(InvokeTest, EphemeralFunctor) { TEST(InvokeTest, EphemeralFunctor) {
EphemeralFunctor f; EphemeralFunctor f;
EXPECT_EQ(1, Invoke(std::move(f), 3, 2)); EXPECT_EQ(1, base_internal::invoke(std::move(f), 3, 2));
EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(EphemeralFunctor(), 3, 2));
} }
TEST(InvokeTest, OverloadedFunctor) { TEST(InvokeTest, OverloadedFunctor) {
OverloadedFunctor f; OverloadedFunctor f;
const OverloadedFunctor& cf = f; const OverloadedFunctor& cf = f;
EXPECT_EQ("&", Invoke(f)); EXPECT_EQ("&", base_internal::invoke(f));
EXPECT_EQ("& 42", Invoke(f, " 42")); EXPECT_EQ("& 42", base_internal::invoke(f, " 42"));
EXPECT_EQ("const&", Invoke(cf)); EXPECT_EQ("const&", base_internal::invoke(cf));
EXPECT_EQ("const& 42", Invoke(cf, " 42")); EXPECT_EQ("const& 42", base_internal::invoke(cf, " 42"));
EXPECT_EQ("&&", Invoke(std::move(f))); EXPECT_EQ("&&", base_internal::invoke(std::move(f)));
EXPECT_EQ("&& 42", Invoke(std::move(f), " 42"));
OverloadedFunctor f2;
EXPECT_EQ("&& 42", base_internal::invoke(std::move(f2), " 42"));
} }
TEST(InvokeTest, ReferenceWrapper) { TEST(InvokeTest, ReferenceWrapper) {
ConstFunctor cf; ConstFunctor cf;
MutableFunctor mf; MutableFunctor mf;
EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2)); EXPECT_EQ(1, base_internal::invoke(std::cref(cf), 3, 2));
EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2)); EXPECT_EQ(1, base_internal::invoke(std::ref(cf), 3, 2));
EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2)); EXPECT_EQ(1, base_internal::invoke(std::ref(mf), 3, 2));
} }
TEST(InvokeTest, MemberFunction) { TEST(InvokeTest, MemberFunction) {
@ -158,58 +160,62 @@ TEST(InvokeTest, MemberFunction) {
std::unique_ptr<const Class> cp(new Class); std::unique_ptr<const Class> cp(new Class);
std::unique_ptr<volatile Class> vp(new Class); std::unique_ptr<volatile Class> vp(new Class);
EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::Method, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::Method, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::Method, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::Method, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefRefMethod, std::move(*p), 3, 2)); // NOLINT EXPECT_EQ(1, base_internal::invoke(&Class::RefRefMethod, std::move(*p), 3,
EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p, 3, 2)); 2)); // NOLINT
EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p.get(), 3, 2));
EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *cp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *vp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *vp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::Method, make_unique<Class>(), 3, 2)); EXPECT_EQ(1,
EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<Class>(), 3, 2)); base_internal::invoke(&Class::Method, make_unique<Class>(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<const Class>(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, make_unique<Class>(),
3, 2));
EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod,
make_unique<const Class>(), 3, 2));
} }
TEST(InvokeTest, DataMember) { TEST(InvokeTest, DataMember) {
std::unique_ptr<Class> p(new Class{42}); std::unique_ptr<Class> p(new Class{42});
std::unique_ptr<const Class> cp(new Class{42}); std::unique_ptr<const Class> cp(new Class{42});
EXPECT_EQ(42, Invoke(&Class::member, p)); EXPECT_EQ(42, base_internal::invoke(&Class::member, p));
EXPECT_EQ(42, Invoke(&Class::member, *p)); EXPECT_EQ(42, base_internal::invoke(&Class::member, *p));
EXPECT_EQ(42, Invoke(&Class::member, p.get())); EXPECT_EQ(42, base_internal::invoke(&Class::member, p.get()));
Invoke(&Class::member, p) = 42; base_internal::invoke(&Class::member, p) = 42;
Invoke(&Class::member, p.get()) = 42; base_internal::invoke(&Class::member, p.get()) = 42;
EXPECT_EQ(42, Invoke(&Class::member, cp)); EXPECT_EQ(42, base_internal::invoke(&Class::member, cp));
EXPECT_EQ(42, Invoke(&Class::member, *cp)); EXPECT_EQ(42, base_internal::invoke(&Class::member, *cp));
EXPECT_EQ(42, Invoke(&Class::member, cp.get())); EXPECT_EQ(42, base_internal::invoke(&Class::member, cp.get()));
} }
TEST(InvokeTest, FlipFlop) { TEST(InvokeTest, FlipFlop) {
FlipFlop obj = {42}; FlipFlop obj = {42};
// This call could resolve to (obj.*&FlipFlop::ConstMethod)() or // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or
// ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former.
EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj)); EXPECT_EQ(42, base_internal::invoke(&FlipFlop::ConstMethod, obj));
EXPECT_EQ(42, Invoke(&FlipFlop::member, obj)); EXPECT_EQ(42, base_internal::invoke(&FlipFlop::member, obj));
} }
TEST(InvokeTest, SfinaeFriendly) { TEST(InvokeTest, SfinaeFriendly) {

View file

@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #ifndef ABSL_BASE_LOG_SEVERITY_H_
#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #define ABSL_BASE_LOG_SEVERITY_H_
#include <array> #include <array>
#include <ostream> #include <ostream>
@ -118,4 +118,4 @@ std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ #endif // ABSL_BASE_LOG_SEVERITY_H_

View file

@ -55,85 +55,6 @@ auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N];
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
// ABSL_FALLTHROUGH_INTENDED
//
// Annotates implicit fall-through between switch labels, allowing a case to
// indicate intentional fallthrough and turn off warnings about any lack of a
// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by
// a semicolon and can be used in most places where `break` can, provided that
// no statements exist between it and the next switch label.
//
// Example:
//
// switch (x) {
// case 40:
// case 41:
// if (truth_is_out_there) {
// ++x;
// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations
// // in comments
// } else {
// return x;
// }
// case 42:
// ...
//
// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED
// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed
// when performing switch labels fall-through diagnostic
// (`-Wimplicit-fallthrough`). See clang documentation on language extensions
// for details:
// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
//
// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro
// has no effect on diagnostics. In any case this macro has no effect on runtime
// behavior and performance of code.
#ifdef ABSL_FALLTHROUGH_INTENDED
#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
#endif
// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported.
#if defined(__clang__) && defined(__has_warning)
#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
#endif
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
#endif
#ifndef ABSL_FALLTHROUGH_INTENDED
#define ABSL_FALLTHROUGH_INTENDED \
do { \
} while (0)
#endif
// ABSL_DEPRECATED()
//
// Marks a deprecated class, struct, enum, function, method and variable
// declarations. The macro argument is used as a custom diagnostic message (e.g.
// suggestion of a better alternative).
//
// Examples:
//
// class ABSL_DEPRECATED("Use Bar instead") Foo {...};
//
// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...}
//
// template <typename T>
// ABSL_DEPRECATED("Use DoThat() instead")
// void DoThis();
//
// Every usage of a deprecated entity will trigger a warning when compiled with
// clang's `-Wdeprecated-declarations` option. This option is turned off by
// default, but the warnings will be reported by clang-tidy.
#if defined(__clang__) && __cplusplus >= 201103L
#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
#endif
#ifndef ABSL_DEPRECATED
#define ABSL_DEPRECATED(message)
#endif
// ABSL_BAD_CALL_IF() // ABSL_BAD_CALL_IF()
// //
// Used on a function overload to trap bad calls: any call that matches the // Used on a function overload to trap bad calls: any call that matches the
@ -223,4 +144,15 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_RETHROW do {} while (false) #define ABSL_INTERNAL_RETHROW do {} while (false)
#endif // ABSL_HAVE_EXCEPTIONS #endif // ABSL_HAVE_EXCEPTIONS
// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which
// reaches one has undefined behavior, and the compiler may optimize
// accordingly.
#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable()
#elif defined(_MSC_VER)
#define ABSL_INTERNAL_UNREACHABLE __assume(0)
#else
#define ABSL_INTERNAL_UNREACHABLE
#endif
#endif // ABSL_BASE_MACROS_H_ #endif // ABSL_BASE_MACROS_H_

View file

@ -171,7 +171,7 @@
// to yield performance improvements. // to yield performance improvements.
#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ #if ABSL_HAVE_BUILTIN(__builtin_expect) || \
(defined(__GNUC__) && !defined(__clang__)) (defined(__GNUC__) && !defined(__clang__))
#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0)) #define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false))
#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) #define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
#else #else
#define ABSL_PREDICT_FALSE(x) (x) #define ABSL_PREDICT_FALSE(x) (x)
@ -179,7 +179,7 @@
#endif #endif
// ABSL_INTERNAL_ASSUME(cond) // ABSL_INTERNAL_ASSUME(cond)
// Informs the compiler than a condition is always true and that it can assume // Informs the compiler that a condition is always true and that it can assume
// it to be true for optimization purposes. The call has undefined behavior if // it to be true for optimization purposes. The call has undefined behavior if
// the condition is false. // the condition is false.
// In !NDEBUG mode, the condition is checked with an assert(). // In !NDEBUG mode, the condition is checked with an assert().

View file

@ -74,9 +74,8 @@ TEST(PredictTest, Pointer) {
const int *null_intptr = nullptr; const int *null_intptr = nullptr;
EXPECT_TRUE(ABSL_PREDICT_TRUE(good_intptr)); EXPECT_TRUE(ABSL_PREDICT_TRUE(good_intptr));
EXPECT_FALSE(ABSL_PREDICT_TRUE(null_intptr)); EXPECT_FALSE(ABSL_PREDICT_TRUE(null_intptr));
// The following doesn't compile: EXPECT_TRUE(ABSL_PREDICT_FALSE(good_intptr));
// EXPECT_TRUE(ABSL_PREDICT_FALSE(good_intptr)); EXPECT_FALSE(ABSL_PREDICT_FALSE(null_intptr));
// EXPECT_FALSE(ABSL_PREDICT_FALSE(null_intptr));
} }
TEST(PredictTest, Optional) { TEST(PredictTest, Optional) {
@ -85,9 +84,8 @@ TEST(PredictTest, Optional) {
absl::optional<bool> no_value; absl::optional<bool> no_value;
EXPECT_TRUE(ABSL_PREDICT_TRUE(has_value)); EXPECT_TRUE(ABSL_PREDICT_TRUE(has_value));
EXPECT_FALSE(ABSL_PREDICT_TRUE(no_value)); EXPECT_FALSE(ABSL_PREDICT_TRUE(no_value));
// The following doesn't compile: EXPECT_TRUE(ABSL_PREDICT_FALSE(has_value));
// EXPECT_TRUE(ABSL_PREDICT_FALSE(has_value)); EXPECT_FALSE(ABSL_PREDICT_FALSE(no_value));
// EXPECT_FALSE(ABSL_PREDICT_FALSE(no_value));
} }
class ImplictlyConvertibleToBool { class ImplictlyConvertibleToBool {
@ -124,9 +122,8 @@ TEST(PredictTest, ExplicitBoolConversion) {
const ExplictlyConvertibleToBool is_false(false); const ExplictlyConvertibleToBool is_false(false);
if (!ABSL_PREDICT_TRUE(is_true)) ADD_FAILURE(); if (!ABSL_PREDICT_TRUE(is_true)) ADD_FAILURE();
if (ABSL_PREDICT_TRUE(is_false)) ADD_FAILURE(); if (ABSL_PREDICT_TRUE(is_false)) ADD_FAILURE();
// The following doesn't compile: if (!ABSL_PREDICT_FALSE(is_true)) ADD_FAILURE();
// if (!ABSL_PREDICT_FALSE(is_true)) ADD_FAILURE(); if (ABSL_PREDICT_FALSE(is_false)) ADD_FAILURE();
// if (ABSL_PREDICT_FALSE(is_false)) ADD_FAILURE();
} }
} // namespace } // namespace

View file

@ -41,7 +41,7 @@
#endif #endif
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Compiler Check // Toolchain Check
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// We support MSVC++ 14.0 update 2 and later. // We support MSVC++ 14.0 update 2 and later.

View file

@ -20,10 +20,12 @@
#include <limits> #include <limits>
#include <random> #include <random>
#include <thread> // NOLINT(build/c++11) #include <thread> // NOLINT(build/c++11)
#include <type_traits>
#include <vector> #include <vector>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/base/attributes.h" #include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/low_level_scheduling.h"
#include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/scheduling_mode.h"
#include "absl/base/internal/spinlock.h" #include "absl/base/internal/spinlock.h"
@ -103,6 +105,10 @@ static void ThreadedTest(SpinLock* spinlock) {
} }
} }
#ifndef ABSL_HAVE_THREAD_SANITIZER
static_assert(std::is_trivially_destructible<SpinLock>(), "");
#endif
TEST(SpinLock, StackNonCooperativeDisablesScheduling) { TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
spinlock.Lock(); spinlock.Lock();

View file

@ -34,16 +34,11 @@
#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ #ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_
#define ABSL_BASE_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_THREAD_ANNOTATIONS_H_
#include "absl/base/attributes.h"
#include "absl/base/config.h" #include "absl/base/config.h"
// TODO(mbonadei): Remove after the backward compatibility period. // TODO(mbonadei): Remove after the backward compatibility period.
#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export #include "absl/base/internal/thread_annotations.h" // IWYU pragma: export
#if defined(__clang__)
#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
#else
#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
#endif
// ABSL_GUARDED_BY() // ABSL_GUARDED_BY()
// //
// Documents if a shared field or global variable needs to be protected by a // Documents if a shared field or global variable needs to be protected by a
@ -61,8 +56,11 @@
// int p1_ ABSL_GUARDED_BY(mu_); // int p1_ ABSL_GUARDED_BY(mu_);
// ... // ...
// }; // };
#define ABSL_GUARDED_BY(x) \ #if ABSL_HAVE_ATTRIBUTE(guarded_by)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x)) #define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x)))
#else
#define ABSL_GUARDED_BY(x)
#endif
// ABSL_PT_GUARDED_BY() // ABSL_PT_GUARDED_BY()
// //
@ -84,8 +82,11 @@
// // `q_`, guarded by `mu1_`, points to a shared memory location that is // // `q_`, guarded by `mu1_`, points to a shared memory location that is
// // guarded by `mu2_`: // // guarded by `mu2_`:
// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); // int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_);
#define ABSL_PT_GUARDED_BY(x) \ #if ABSL_HAVE_ATTRIBUTE(pt_guarded_by)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x)) #define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x)))
#else
#define ABSL_PT_GUARDED_BY(x)
#endif
// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() // ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE()
// //
@ -102,11 +103,17 @@
// //
// Mutex m1_; // Mutex m1_;
// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); // Mutex m2_ ABSL_ACQUIRED_AFTER(m1_);
#define ABSL_ACQUIRED_AFTER(...) \ #if ABSL_HAVE_ATTRIBUTE(acquired_after)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__)) #define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__)))
#else
#define ABSL_ACQUIRED_AFTER(...)
#endif
#define ABSL_ACQUIRED_BEFORE(...) \ #if ABSL_HAVE_ATTRIBUTE(acquired_before)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__)) #define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__)))
#else
#define ABSL_ACQUIRED_BEFORE(...)
#endif
// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() // ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED()
// //
@ -131,33 +138,50 @@
// //
// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } // void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ #if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \
exclusive_locks_required(__VA_ARGS__)) __attribute__((exclusive_locks_required(__VA_ARGS__)))
#else
#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...)
#endif
#if ABSL_HAVE_ATTRIBUTE(shared_locks_required)
#define ABSL_SHARED_LOCKS_REQUIRED(...) \ #define ABSL_SHARED_LOCKS_REQUIRED(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_locks_required(__VA_ARGS__)) __attribute__((shared_locks_required(__VA_ARGS__)))
#else
#define ABSL_SHARED_LOCKS_REQUIRED(...)
#endif
// ABSL_LOCKS_EXCLUDED() // ABSL_LOCKS_EXCLUDED()
// //
// Documents the locks acquired in the body of the function. These locks // Documents the locks acquired in the body of the function. These locks
// cannot be held when calling this function (as Abseil's `Mutex` locks are // cannot be held when calling this function (as Abseil's `Mutex` locks are
// non-reentrant). // non-reentrant).
#define ABSL_LOCKS_EXCLUDED(...) \ #if ABSL_HAVE_ATTRIBUTE(locks_excluded)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__)) #define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
#else
#define ABSL_LOCKS_EXCLUDED(...)
#endif
// ABSL_LOCK_RETURNED() // ABSL_LOCK_RETURNED()
// //
// Documents a function that returns a mutex without acquiring it. For example, // Documents a function that returns a mutex without acquiring it. For example,
// a public getter method that returns a pointer to a private mutex should // a public getter method that returns a pointer to a private mutex should
// be annotated with ABSL_LOCK_RETURNED. // be annotated with ABSL_LOCK_RETURNED.
#define ABSL_LOCK_RETURNED(x) \ #if ABSL_HAVE_ATTRIBUTE(lock_returned)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x)) #define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x)))
#else
#define ABSL_LOCK_RETURNED(x)
#endif
// ABSL_LOCKABLE // ABSL_LOCKABLE
// //
// Documents if a class/type is a lockable type (such as the `Mutex` class). // Documents if a class/type is a lockable type (such as the `Mutex` class).
#define ABSL_LOCKABLE ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lockable) #if ABSL_HAVE_ATTRIBUTE(lockable)
#define ABSL_LOCKABLE __attribute__((lockable))
#else
#define ABSL_LOCKABLE
#endif
// ABSL_SCOPED_LOCKABLE // ABSL_SCOPED_LOCKABLE
// //
@ -166,30 +190,43 @@
// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no // acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
// arguments; the analysis will assume that the destructor unlocks whatever the // arguments; the analysis will assume that the destructor unlocks whatever the
// constructor locked. // constructor locked.
#define ABSL_SCOPED_LOCKABLE \ #if ABSL_HAVE_ATTRIBUTE(scoped_lockable)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable) #define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable))
#else
#define ABSL_SCOPED_LOCKABLE
#endif
// ABSL_EXCLUSIVE_LOCK_FUNCTION() // ABSL_EXCLUSIVE_LOCK_FUNCTION()
// //
// Documents functions that acquire a lock in the body of a function, and do // Documents functions that acquire a lock in the body of a function, and do
// not release it. // not release it.
#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ #if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \
exclusive_lock_function(__VA_ARGS__)) __attribute__((exclusive_lock_function(__VA_ARGS__)))
#else
#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...)
#endif
// ABSL_SHARED_LOCK_FUNCTION() // ABSL_SHARED_LOCK_FUNCTION()
// //
// Documents functions that acquire a shared (reader) lock in the body of a // Documents functions that acquire a shared (reader) lock in the body of a
// function, and do not release it. // function, and do not release it.
#if ABSL_HAVE_ATTRIBUTE(shared_lock_function)
#define ABSL_SHARED_LOCK_FUNCTION(...) \ #define ABSL_SHARED_LOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_lock_function(__VA_ARGS__)) __attribute__((shared_lock_function(__VA_ARGS__)))
#else
#define ABSL_SHARED_LOCK_FUNCTION(...)
#endif
// ABSL_UNLOCK_FUNCTION() // ABSL_UNLOCK_FUNCTION()
// //
// Documents functions that expect a lock to be held on entry to the function, // Documents functions that expect a lock to be held on entry to the function,
// and release it in the body of the function. // and release it in the body of the function.
#define ABSL_UNLOCK_FUNCTION(...) \ #if ABSL_HAVE_ATTRIBUTE(unlock_function)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(unlock_function(__VA_ARGS__)) #define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__)))
#else
#define ABSL_UNLOCK_FUNCTION(...)
#endif
// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() // ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION()
// //
@ -199,31 +236,49 @@
// success, or `false` for functions that return `false` on success. The second // success, or `false` for functions that return `false` on success. The second
// argument specifies the mutex that is locked on success. If unspecified, this // argument specifies the mutex that is locked on success. If unspecified, this
// mutex is assumed to be `this`. // mutex is assumed to be `this`.
#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function)
#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ __attribute__((exclusive_trylock_function(__VA_ARGS__)))
exclusive_trylock_function(__VA_ARGS__)) #else
#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...)
#endif
#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ #if ABSL_HAVE_ATTRIBUTE(shared_trylock_function)
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ #define ABSL_SHARED_TRYLOCK_FUNCTION(...) \
shared_trylock_function(__VA_ARGS__)) __attribute__((shared_trylock_function(__VA_ARGS__)))
#else
#define ABSL_SHARED_TRYLOCK_FUNCTION(...)
#endif
// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() // ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK()
// //
// Documents functions that dynamically check to see if a lock is held, and fail // Documents functions that dynamically check to see if a lock is held, and fail
// if it is not held. // if it is not held.
#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock)
#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_exclusive_lock(__VA_ARGS__)) __attribute__((assert_exclusive_lock(__VA_ARGS__)))
#else
#define ABSL_ASSERT_EXCLUSIVE_LOCK(...)
#endif
#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock)
#define ABSL_ASSERT_SHARED_LOCK(...) \ #define ABSL_ASSERT_SHARED_LOCK(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_lock(__VA_ARGS__)) __attribute__((assert_shared_lock(__VA_ARGS__)))
#else
#define ABSL_ASSERT_SHARED_LOCK(...)
#endif
// ABSL_NO_THREAD_SAFETY_ANALYSIS // ABSL_NO_THREAD_SAFETY_ANALYSIS
// //
// Turns off thread safety checking within the body of a particular function. // Turns off thread safety checking within the body of a particular function.
// This annotation is used to mark functions that are known to be correct, but // This annotation is used to mark functions that are known to be correct, but
// the locking behavior is more complicated than the analyzer can handle. // the locking behavior is more complicated than the analyzer can handle.
#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis)
#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ #define ABSL_NO_THREAD_SAFETY_ANALYSIS \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis) __attribute__((no_thread_safety_analysis))
#else
#define ABSL_NO_THREAD_SAFETY_ANALYSIS
#endif
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Tool-Supplied Annotations // Tool-Supplied Annotations

View file

@ -1,38 +0,0 @@
#
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates config_setting that allows selecting based on 'compiler' value."""
def create_llvm_config(name, visibility):
# The "do_not_use_tools_cpp_compiler_present" attribute exists to
# distinguish between older versions of Bazel that do not support
# "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do.
# In the future, the only way to select on the compiler will be through
# flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can
# be removed.
if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"):
native.config_setting(
name = name,
flag_values = {
"@bazel_tools//tools/cpp:compiler": "llvm",
},
visibility = visibility,
)
else:
native.config_setting(
name = name,
values = {"compiler": "llvm"},
visibility = visibility,
)

View file

@ -24,7 +24,7 @@ load(
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0 licenses(["notice"])
cc_library( cc_library(
name = "compressed_tuple", name = "compressed_tuple",
@ -60,6 +60,7 @@ cc_library(
deps = [ deps = [
":compressed_tuple", ":compressed_tuple",
"//absl/algorithm", "//absl/algorithm",
"//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:dynamic_annotations", "//absl/base:dynamic_annotations",
"//absl/base:throw_delegate", "//absl/base:throw_delegate",
@ -368,6 +369,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
"//absl/base:config",
"//absl/memory", "//absl/memory",
"//absl/meta:type_traits", "//absl/meta:type_traits",
"//absl/utility", "//absl/utility",
@ -620,6 +622,7 @@ cc_test(
":hashtable_debug", ":hashtable_debug",
":raw_hash_set", ":raw_hash_set",
"//absl/base", "//absl/base",
"//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:raw_logging_internal", "//absl/base:raw_logging_internal",
"//absl/strings", "//absl/strings",
@ -647,6 +650,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS, copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
"//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/meta:type_traits", "//absl/meta:type_traits",
"//absl/strings", "//absl/strings",
@ -665,6 +669,7 @@ cc_test(
visibility = ["//visibility:private"], visibility = ["//visibility:private"],
deps = [ deps = [
":layout", ":layout",
"//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:raw_logging_internal", "//absl/base:raw_logging_internal",
"//absl/types:span", "//absl/types:span",

View file

@ -131,6 +131,7 @@ absl_cc_library(
DEPS DEPS
absl::compressed_tuple absl::compressed_tuple
absl::algorithm absl::algorithm
absl::config
absl::core_headers absl::core_headers
absl::dynamic_annotations absl::dynamic_annotations
absl::throw_delegate absl::throw_delegate
@ -423,6 +424,7 @@ absl_cc_library(
COPTS COPTS
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
DEPS DEPS
absl::config
absl::memory absl::memory
absl::type_traits absl::type_traits
absl::utility absl::utility
@ -696,6 +698,7 @@ absl_cc_test(
absl::hashtable_debug absl::hashtable_debug
absl::raw_hash_set absl::raw_hash_set
absl::base absl::base
absl::config
absl::core_headers absl::core_headers
absl::raw_logging_internal absl::raw_logging_internal
absl::strings absl::strings
@ -724,6 +727,7 @@ absl_cc_library(
COPTS COPTS
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
DEPS DEPS
absl::config
absl::core_headers absl::core_headers
absl::meta absl::meta
absl::strings absl::strings
@ -741,6 +745,7 @@ absl_cc_test(
${ABSL_TEST_COPTS} ${ABSL_TEST_COPTS}
DEPS DEPS
absl::layout absl::layout
absl::config
absl::core_headers absl::core_headers
absl::raw_logging_internal absl::raw_logging_internal
absl::span absl::span

View file

@ -185,7 +185,7 @@ class btree_map
// template <typename K> size_type erase(const K& key): // template <typename K> size_type erase(const K& key):
// //
// Erases the element with the matching key, if it exists, returning the // Erases the element with the matching key, if it exists, returning the
// number of elements erased. // number of elements erased (0 or 1).
using Base::erase; using Base::erase;
// btree_map::insert() // btree_map::insert()
@ -325,6 +325,11 @@ class btree_map
// does not contain an element with a matching key, this function returns an // does not contain an element with a matching key, this function returns an
// empty node handle. // empty node handle.
// //
// NOTE: when compiled in an earlier version of C++ than C++17,
// `node_type::key()` returns a const reference to the key instead of a
// mutable reference. We cannot safely return a mutable reference without
// std::launder (which is not available before C++17).
//
// NOTE: In this context, `node_type` refers to the C++17 concept of a // NOTE: In this context, `node_type` refers to the C++17 concept of a
// move-only type that owns and provides access to the elements in associative // move-only type that owns and provides access to the elements in associative
// containers (https://en.cppreference.com/w/cpp/container/node_handle). // containers (https://en.cppreference.com/w/cpp/container/node_handle).
@ -652,6 +657,11 @@ class btree_multimap
// does not contain an element with a matching key, this function returns an // does not contain an element with a matching key, this function returns an
// empty node handle. // empty node handle.
// //
// NOTE: when compiled in an earlier version of C++ than C++17,
// `node_type::key()` returns a const reference to the key instead of a
// mutable reference. We cannot safely return a mutable reference without
// std::launder (which is not available before C++17).
//
// NOTE: In this context, `node_type` refers to the C++17 concept of a // NOTE: In this context, `node_type` refers to the C++17 concept of a
// move-only type that owns and provides access to the elements in associative // move-only type that owns and provides access to the elements in associative
// containers (https://en.cppreference.com/w/cpp/container/node_handle). // containers (https://en.cppreference.com/w/cpp/container/node_handle).

View file

@ -183,7 +183,7 @@ class btree_set
// template <typename K> size_type erase(const K& key): // template <typename K> size_type erase(const K& key):
// //
// Erases the element with the matching key, if it exists, returning the // Erases the element with the matching key, if it exists, returning the
// number of elements erased. // number of elements erased (0 or 1).
using Base::erase; using Base::erase;
// btree_set::insert() // btree_set::insert()

View file

@ -15,6 +15,7 @@
#include "absl/container/btree_test.h" #include "absl/container/btree_test.h"
#include <cstdint> #include <cstdint>
#include <limits>
#include <map> #include <map>
#include <memory> #include <memory>
#include <stdexcept> #include <stdexcept>
@ -52,7 +53,9 @@ using ::absl::test_internal::MovableOnlyInstance;
using ::testing::ElementsAre; using ::testing::ElementsAre;
using ::testing::ElementsAreArray; using ::testing::ElementsAreArray;
using ::testing::IsEmpty; using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::Pair; using ::testing::Pair;
using ::testing::SizeIs;
template <typename T, typename U> template <typename T, typename U>
void CheckPairEquals(const T &x, const U &y) { void CheckPairEquals(const T &x, const U &y) {
@ -1180,6 +1183,103 @@ TEST(Btree, RangeCtorSanity) {
EXPECT_EQ(1, tmap.size()); EXPECT_EQ(1, tmap.size());
} }
} // namespace
class BtreeNodePeer {
public:
// Yields the size of a leaf node with a specific number of values.
template <typename ValueType>
constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
return btree_node<
set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
/*TargetNodeSize=*/256, // This parameter isn't used here.
/*Multi=*/false>>::SizeWithNValues(target_values_per_node);
}
// Yields the number of values in a (non-root) leaf node for this btree.
template <typename Btree>
constexpr static size_t GetNumValuesPerNode() {
return btree_node<typename Btree::params_type>::kNodeValues;
}
template <typename Btree>
constexpr static size_t GetMaxFieldType() {
return std::numeric_limits<
typename btree_node<typename Btree::params_type>::field_type>::max();
}
template <typename Btree>
constexpr static bool UsesLinearNodeSearch() {
return btree_node<typename Btree::params_type>::use_linear_search::value;
}
};
namespace {
class BtreeMapTest : public ::testing::Test {
public:
struct Key {};
struct Cmp {
template <typename T>
bool operator()(T, T) const {
return false;
}
};
struct KeyLin {
using absl_btree_prefer_linear_node_search = std::true_type;
};
struct CmpLin : Cmp {
using absl_btree_prefer_linear_node_search = std::true_type;
};
struct KeyBin {
using absl_btree_prefer_linear_node_search = std::false_type;
};
struct CmpBin : Cmp {
using absl_btree_prefer_linear_node_search = std::false_type;
};
template <typename K, typename C>
static bool IsLinear() {
return BtreeNodePeer::UsesLinearNodeSearch<absl::btree_map<K, int, C>>();
}
};
TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) {
// Test requesting linear search by directly exporting an alias.
EXPECT_FALSE((IsLinear<Key, Cmp>()));
EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
EXPECT_TRUE((IsLinear<Key, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
}
TEST_F(BtreeMapTest, LinearChoiceTree) {
// Cmp has precedence, and is forcing binary
EXPECT_FALSE((IsLinear<Key, CmpBin>()));
EXPECT_FALSE((IsLinear<KeyLin, CmpBin>()));
EXPECT_FALSE((IsLinear<KeyBin, CmpBin>()));
EXPECT_FALSE((IsLinear<int, CmpBin>()));
EXPECT_FALSE((IsLinear<std::string, CmpBin>()));
// Cmp has precedence, and is forcing linear
EXPECT_TRUE((IsLinear<Key, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyBin, CmpLin>()));
EXPECT_TRUE((IsLinear<int, CmpLin>()));
EXPECT_TRUE((IsLinear<std::string, CmpLin>()));
// Cmp has no preference, Key determines linear vs binary.
EXPECT_FALSE((IsLinear<Key, Cmp>()));
EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
EXPECT_FALSE((IsLinear<KeyBin, Cmp>()));
// arithmetic key w/ std::less or std::greater: linear
EXPECT_TRUE((IsLinear<int, std::less<int>>()));
EXPECT_TRUE((IsLinear<double, std::greater<double>>()));
// arithmetic key w/ custom compare: binary
EXPECT_FALSE((IsLinear<int, Cmp>()));
// non-arithmetic key: binary
EXPECT_FALSE((IsLinear<std::string, std::less<std::string>>()));
}
TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
absl::btree_map<std::string, std::unique_ptr<std::string>> m; absl::btree_map<std::string, std::unique_ptr<std::string>> m;
@ -1325,28 +1425,6 @@ TEST(Btree, RValueInsert) {
EXPECT_EQ(tracker.swaps(), 0); EXPECT_EQ(tracker.swaps(), 0);
} }
} // namespace
class BtreeNodePeer {
public:
// Yields the size of a leaf node with a specific number of values.
template <typename ValueType>
constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
return btree_node<
set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
/*TargetNodeSize=*/256, // This parameter isn't used here.
/*Multi=*/false>>::SizeWithNValues(target_values_per_node);
}
// Yields the number of values in a (non-root) leaf node for this set.
template <typename Set>
constexpr static size_t GetNumValuesPerNode() {
return btree_node<typename Set::params_type>::kNodeValues;
}
};
namespace {
// A btree set with a specific number of values per node. // A btree set with a specific number of values per node.
template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>> template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
class SizedBtreeSet class SizedBtreeSet
@ -2101,6 +2179,31 @@ TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) {
Pair(4, 1), Pair(4, 4), Pair(5, 5))); Pair(4, 1), Pair(4, 4), Pair(5, 5)));
} }
TEST(Btree, MergeIntoSetMovableOnly) {
absl::btree_set<MovableOnlyInstance> src;
src.insert(MovableOnlyInstance(1));
absl::btree_multiset<MovableOnlyInstance> dst1;
dst1.insert(MovableOnlyInstance(2));
absl::btree_set<MovableOnlyInstance> dst2;
// Test merge into multiset.
dst1.merge(src);
EXPECT_TRUE(src.empty());
// ElementsAre/ElementsAreArray don't work with move-only types.
ASSERT_THAT(dst1, SizeIs(2));
EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1));
EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2));
// Test merge into set.
dst2.merge(dst1);
EXPECT_TRUE(dst1.empty());
ASSERT_THAT(dst2, SizeIs(2));
EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1));
EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2));
}
struct KeyCompareToWeakOrdering { struct KeyCompareToWeakOrdering {
template <typename T> template <typename T>
absl::weak_ordering operator()(const T &a, const T &b) const { absl::weak_ordering operator()(const T &a, const T &b) const {
@ -2404,6 +2507,320 @@ TEST(Btree, BitfieldArgument) {
m[n]; m[n];
} }
TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) {
const absl::string_view names[] = {"n1", "n2"};
absl::btree_set<std::string> name_set1{std::begin(names), std::end(names)};
EXPECT_THAT(name_set1, ElementsAreArray(names));
absl::btree_set<std::string> name_set2;
name_set2.insert(std::begin(names), std::end(names));
EXPECT_THAT(name_set2, ElementsAreArray(names));
}
// A type that is explicitly convertible from int and counts constructor calls.
struct ConstructorCounted {
explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; }
bool operator==(int other) const { return i == other; }
int i;
static int constructor_calls;
};
int ConstructorCounted::constructor_calls = 0;
struct ConstructorCountedCompare {
bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; }
bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; }
bool operator()(const ConstructorCounted &a,
const ConstructorCounted &b) const {
return a.i < b.i;
}
using is_transparent = void;
};
TEST(Btree,
SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
const int i[] = {0, 1, 1};
ConstructorCounted::constructor_calls = 0;
absl::btree_set<ConstructorCounted, ConstructorCountedCompare> set{
std::begin(i), std::end(i)};
EXPECT_THAT(set, ElementsAre(0, 1));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
set.insert(std::begin(i), std::end(i));
EXPECT_THAT(set, ElementsAre(0, 1));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
}
TEST(Btree,
SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
const int i[] = {0, 1};
absl::btree_set<std::vector<void *>> s1{std::begin(i), std::end(i)};
EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
absl::btree_set<std::vector<void *>> s2;
s2.insert(std::begin(i), std::end(i));
EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
}
// libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that
// prevents explicit conversions between pair types.
// We only run this test for the libstdc++ from GCC 7 or newer because we can't
// reliably check the libstdc++ version prior to that release.
#if !defined(__GLIBCXX__) || \
(defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7)
TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) {
const std::pair<absl::string_view, int> names[] = {{"n1", 1}, {"n2", 2}};
absl::btree_map<std::string, int> name_map1{std::begin(names),
std::end(names)};
EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
absl::btree_map<std::string, int> name_map2;
name_map2.insert(std::begin(names), std::end(names));
EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
}
TEST(Btree,
MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
const std::pair<int, int> i[] = {{0, 1}, {1, 2}, {1, 3}};
ConstructorCounted::constructor_calls = 0;
absl::btree_map<ConstructorCounted, int, ConstructorCountedCompare> map{
std::begin(i), std::end(i)};
EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
map.insert(std::begin(i), std::end(i));
EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
}
TEST(Btree,
MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
const std::pair<int, int> i[] = {{0, 1}, {1, 2}};
absl::btree_map<std::vector<void *>, int> m1{std::begin(i), std::end(i)};
EXPECT_THAT(m1,
ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
absl::btree_map<std::vector<void *>, int> m2;
m2.insert(std::begin(i), std::end(i));
EXPECT_THAT(m2,
ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
}
TEST(Btree, HeterogeneousTryEmplace) {
absl::btree_map<std::string, int> m;
std::string s = "key";
absl::string_view sv = s;
m.try_emplace(sv, 1);
EXPECT_EQ(m[s], 1);
m.try_emplace(m.end(), sv, 2);
EXPECT_EQ(m[s], 1);
}
TEST(Btree, HeterogeneousOperatorMapped) {
absl::btree_map<std::string, int> m;
std::string s = "key";
absl::string_view sv = s;
m[sv] = 1;
EXPECT_EQ(m[s], 1);
m[sv] = 2;
EXPECT_EQ(m[s], 2);
}
TEST(Btree, HeterogeneousInsertOrAssign) {
absl::btree_map<std::string, int> m;
std::string s = "key";
absl::string_view sv = s;
m.insert_or_assign(sv, 1);
EXPECT_EQ(m[s], 1);
m.insert_or_assign(m.end(), sv, 2);
EXPECT_EQ(m[s], 2);
}
#endif
// This test requires std::launder for mutable key access in node handles.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(Btree, NodeHandleMutableKeyAccess) {
{
absl::btree_map<std::string, std::string> map;
map["key1"] = "mapped";
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
}
// Also for multimap.
{
absl::btree_multimap<std::string, std::string> map;
map.emplace("key1", "mapped");
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
}
}
#endif
struct MultiKey {
int i1;
int i2;
};
bool operator==(const MultiKey a, const MultiKey b) {
return a.i1 == b.i1 && a.i2 == b.i2;
}
// A heterogeneous comparator that has different equivalence classes for
// different lookup types.
struct MultiKeyComp {
using is_transparent = void;
bool operator()(const MultiKey a, const MultiKey b) const {
if (a.i1 != b.i1) return a.i1 < b.i1;
return a.i2 < b.i2;
}
bool operator()(const int a, const MultiKey b) const { return a < b.i1; }
bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
};
TEST(Btree, MultiKeyEqualRange) {
absl::btree_set<MultiKey, MultiKeyComp> set;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
set.insert({i, j});
}
}
for (int i = 0; i < 100; ++i) {
auto equal_range = set.equal_range(i);
EXPECT_EQ(equal_range.first->i1, i);
EXPECT_EQ(equal_range.first->i2, 0);
EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
}
}
TEST(Btree, MultiKeyErase) {
absl::btree_set<MultiKey, MultiKeyComp> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.erase(2), 2);
EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
}
TEST(Btree, MultiKeyCount) {
const absl::btree_set<MultiKey, MultiKeyComp> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.count(2), 2);
}
TEST(Btree, AllocConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set(alloc);
set.insert({1, 2, 3});
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocInitializerListConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set({1, 2, 3}, alloc);
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocRangeConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
std::vector<int> v = {1, 2, 3};
Set set(v.begin(), v.end(), alloc);
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocCopyConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used1 = 0;
Alloc alloc1(&bytes_used1);
Set set1(alloc1);
set1.insert({1, 2, 3});
int64_t bytes_used2 = 0;
Alloc alloc2(&bytes_used2);
Set set2(set1, alloc2);
EXPECT_THAT(set1, ElementsAre(1, 2, 3));
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used1, set1.size() * sizeof(int));
EXPECT_EQ(bytes_used1, bytes_used2);
}
TEST(Btree, AllocMoveConstructor_SameAlloc) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set1(alloc);
set1.insert({1, 2, 3});
const int64_t original_bytes_used = bytes_used;
EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
Set set2(std::move(set1), alloc);
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_EQ(bytes_used, original_bytes_used);
}
TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used1 = 0;
Alloc alloc1(&bytes_used1);
Set set1(alloc1);
set1.insert({1, 2, 3});
const int64_t original_bytes_used = bytes_used1;
EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
int64_t bytes_used2 = 0;
Alloc alloc2(&bytes_used2);
Set set2(std::move(set1), alloc2);
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
// We didn't free these bytes allocated by `set1` yet.
EXPECT_EQ(bytes_used1, original_bytes_used);
EXPECT_EQ(bytes_used2, original_bytes_used);
}
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

View file

@ -41,6 +41,7 @@
#include <type_traits> #include <type_traits>
#include "absl/algorithm/algorithm.h" #include "absl/algorithm/algorithm.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h" #include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/throw_delegate.h" #include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h" #include "absl/base/macros.h"
@ -231,8 +232,8 @@ class FixedArray {
// FixedArray::at // FixedArray::at
// //
// Bounds-checked access. Returns a reference to the ith element of the // Bounds-checked access. Returns a reference to the ith element of the fixed
// fiexed array, or throws std::out_of_range // array, or throws std::out_of_range
reference at(size_type i) { reference at(size_type i) {
if (ABSL_PREDICT_FALSE(i >= size())) { if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
@ -422,15 +423,15 @@ class FixedArray {
void AnnotateConstruct(size_type n); void AnnotateConstruct(size_type n);
void AnnotateDestruct(size_type n); void AnnotateDestruct(size_type n);
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
void* RedzoneBegin() { return &redzone_begin_; } void* RedzoneBegin() { return &redzone_begin_; }
void* RedzoneEnd() { return &redzone_end_ + 1; } void* RedzoneEnd() { return &redzone_end_ + 1; }
#endif // ADDRESS_SANITIZER #endif // ABSL_HAVE_ADDRESS_SANITIZER
private: private:
ADDRESS_SANITIZER_REDZONE(redzone_begin_); ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
ADDRESS_SANITIZER_REDZONE(redzone_end_); ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
}; };
class EmptyInlinedStorage { class EmptyInlinedStorage {
@ -503,22 +504,26 @@ constexpr typename FixedArray<T, N, A>::size_type
template <typename T, size_t N, typename A> template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct( void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) { typename FixedArray<T, N, A>::size_type n) {
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return; if (!n) return;
ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); data() + n);
#endif // ADDRESS_SANITIZER ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
RedzoneBegin());
#endif // ABSL_HAVE_ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode static_cast<void>(n); // Mark used when not in asan mode
} }
template <typename T, size_t N, typename A> template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct( void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
typename FixedArray<T, N, A>::size_type n) { typename FixedArray<T, N, A>::size_type n) {
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return; if (!n) return;
ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); RedzoneEnd());
#endif // ADDRESS_SANITIZER ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
data());
#endif // ABSL_HAVE_ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode static_cast<void>(n); // Mark used when not in asan mode
} }
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

View file

@ -150,8 +150,7 @@ TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) {
template <typename FixedArrT> template <typename FixedArrT>
testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) { testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) {
// Marked volatile to prevent optimization. Used for running asan tests. int sum = 0;
volatile int sum = 0;
for (const auto& thrower : *fixed_arr) { for (const auto& thrower : *fixed_arr) {
sum += thrower.Get(); sum += thrower.Get();
} }

View file

@ -27,6 +27,7 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h" #include "absl/base/internal/exception_testing.h"
#include "absl/base/options.h" #include "absl/base/options.h"
#include "absl/container/internal/counting_allocator.h" #include "absl/container/internal/counting_allocator.h"
@ -767,7 +768,7 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
} }
} }
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) { TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10); absl::FixedArray<int, 32> a(10);
int* raw = a.data(); int* raw = a.data();
@ -814,7 +815,7 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
// so reading raw[21] should still trigger the correct warning. // so reading raw[21] should still trigger the correct warning.
EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow");
} }
#endif // ADDRESS_SANITIZER #endif // ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AbslHashValueWorks) { TEST(FixedArrayTest, AbslHashValueWorks) {
using V = absl::FixedArray<int>; using V = absl::FixedArray<int>;

View file

@ -234,7 +234,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// //
// size_type erase(const key_type& key): // size_type erase(const key_type& key):
// //
// Erases the element with the matching key, if it exists. // Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase; using Base::erase;
// flat_hash_map::insert() // flat_hash_map::insert()
@ -383,6 +384,11 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// key value and returns a node handle owning that extracted data. If the // key value and returns a node handle owning that extracted data. If the
// `flat_hash_map` does not contain an element with a matching key, this // `flat_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle. // function returns an empty node handle.
//
// NOTE: when compiled in an earlier version of C++ than C++17,
// `node_type::key()` returns a const reference to the key instead of a
// mutable reference. We cannot safely return a mutable reference without
// std::launder (which is not available before C++17).
using Base::extract; using Base::extract;
// flat_hash_map::merge() // flat_hash_map::merge()

View file

@ -267,6 +267,21 @@ TEST(FlatHashMap, EraseIf) {
} }
} }
// This test requires std::launder for mutable key access in node handles.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
flat_hash_map<std::string, std::string> map;
map["key1"] = "mapped";
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
}
#endif
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

View file

@ -227,7 +227,8 @@ class flat_hash_set
// //
// size_type erase(const key_type& key): // size_type erase(const key_type& key):
// //
// Erases the element with the matching key, if it exists. // Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase; using Base::erase;
// flat_hash_set::insert() // flat_hash_set::insert()
@ -323,7 +324,7 @@ class flat_hash_set
// flat_hash_set::merge() // flat_hash_set::merge()
// //
// Extracts elements from a given `source` flat hash map into this // Extracts elements from a given `source` flat hash set into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an // `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted. // element with an equivalent key, that element is not extracted.
using Base::merge; using Base::merge;

View file

@ -64,7 +64,7 @@ ABSL_NAMESPACE_BEGIN
// `std::vector` for use cases where the vector's size is sufficiently small // `std::vector` for use cases where the vector's size is sufficiently small
// that it can be inlined. If the inlined vector does grow beyond its estimated // that it can be inlined. If the inlined vector does grow beyond its estimated
// capacity, it will trigger an initial allocation on the heap, and will behave // capacity, it will trigger an initial allocation on the heap, and will behave
// as a `std:vector`. The API of the `absl::InlinedVector` within this file is // as a `std::vector`. The API of the `absl::InlinedVector` within this file is
// designed to cover the same API footprint as covered by `std::vector`. // designed to cover the same API footprint as covered by `std::vector`.
template <typename T, size_t N, typename A = std::allocator<T>> template <typename T, size_t N, typename A = std::allocator<T>>
class InlinedVector { class InlinedVector {

View file

@ -736,22 +736,26 @@ TEST(OverheadTest, Storage) {
// In particular, ensure that std::allocator doesn't cost anything to store. // In particular, ensure that std::allocator doesn't cost anything to store.
// The union should be absorbing some of the allocation bookkeeping overhead // The union should be absorbing some of the allocation bookkeeping overhead
// in the larger vectors, leaving only the size_ field as overhead. // in the larger vectors, leaving only the size_ field as overhead.
EXPECT_EQ(2 * sizeof(int*),
sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*)); struct T { void* val; };
EXPECT_EQ(1 * sizeof(int*), size_t expected_overhead = sizeof(T);
sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ((2 * expected_overhead),
sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*)); sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*)); sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*)); sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*)); sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*)); sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5]));
EXPECT_EQ(1 * sizeof(int*), EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*)); sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8]));
} }
TEST(IntVec, Clear) { TEST(IntVec, Clear) {

File diff suppressed because it is too large Load diff

View file

@ -23,6 +23,7 @@
#include "absl/base/internal/throw_delegate.h" #include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h" #include "absl/container/internal/common.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
namespace absl { namespace absl {
@ -68,8 +69,21 @@ class btree_container {
explicit btree_container(const key_compare &comp, explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type()) const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {} : tree_(comp, alloc) {}
btree_container(const btree_container &other) = default; explicit btree_container(const allocator_type &alloc)
btree_container(btree_container &&other) noexcept = default; : tree_(key_compare(), alloc) {}
btree_container(const btree_container &other)
: btree_container(other, absl::allocator_traits<allocator_type>::
select_on_container_copy_construction(
other.get_allocator())) {}
btree_container(const btree_container &other, const allocator_type &alloc)
: tree_(other.tree_, alloc) {}
btree_container(btree_container &&other) noexcept(
std::is_nothrow_move_constructible<Tree>::value) = default;
btree_container(btree_container &&other, const allocator_type &alloc)
: tree_(std::move(other.tree_), alloc) {}
btree_container &operator=(const btree_container &other) = default; btree_container &operator=(const btree_container &other) = default;
btree_container &operator=(btree_container &&other) noexcept( btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default; std::is_nothrow_move_assignable<Tree>::value) = default;
@ -90,6 +104,11 @@ class btree_container {
// Lookup routines. // Lookup routines.
template <typename K = key_type> template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
auto equal_range = this->equal_range(key);
return std::distance(equal_range.first, equal_range.second);
}
template <typename K = key_type>
iterator find(const key_arg<K> &key) { iterator find(const key_arg<K> &key) {
return tree_.find(key); return tree_.find(key);
} }
@ -138,6 +157,11 @@ class btree_container {
iterator erase(const_iterator first, const_iterator last) { iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second; return tree_.erase_range(iterator(first), iterator(last)).second;
} }
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
auto equal_range = this->equal_range(key);
return tree_.erase_range(equal_range.first, equal_range.second).first;
}
// Extract routines. // Extract routines.
node_type extract(iterator position) { node_type extract(iterator position) {
@ -151,7 +175,6 @@ class btree_container {
return extract(iterator(position)); return extract(iterator(position));
} }
public:
// Utility routines. // Utility routines.
void clear() { tree_.clear(); } void clear() { tree_.clear(); }
void swap(btree_container &other) { tree_.swap(other.tree_); } void swap(btree_container &other) { tree_.swap(other.tree_); }
@ -235,7 +258,7 @@ class btree_set_container : public btree_container<Tree> {
using super_type::super_type; using super_type::super_type;
btree_set_container() {} btree_set_container() {}
// Range constructor. // Range constructors.
template <class InputIterator> template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e, btree_set_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
@ -243,18 +266,19 @@ class btree_set_container : public btree_container<Tree> {
: super_type(comp, alloc) { : super_type(comp, alloc) {
insert(b, e); insert(b, e);
} }
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_set_container(b, e, key_compare(), alloc) {}
// Initializer list constructor. // Initializer list constructors.
btree_set_container(std::initializer_list<init_type> init, btree_set_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type()) const allocator_type &alloc = allocator_type())
: btree_set_container(init.begin(), init.end(), comp, alloc) {} : btree_set_container(init.begin(), init.end(), comp, alloc) {}
btree_set_container(std::initializer_list<init_type> init,
// Lookup routines. const allocator_type &alloc)
template <typename K = key_type> : btree_set_container(init.begin(), init.end(), alloc) {}
size_type count(const key_arg<K> &key) const {
return this->tree_.count_unique(key);
}
// Insertion routines. // Insertion routines.
std::pair<iterator, bool> insert(const value_type &v) { std::pair<iterator, bool> insert(const value_type &v) {
@ -268,31 +292,29 @@ class btree_set_container : public btree_container<Tree> {
init_type v(std::forward<Args>(args)...); init_type v(std::forward<Args>(args)...);
return this->tree_.insert_unique(params_type::key(v), std::move(v)); return this->tree_.insert_unique(params_type::key(v), std::move(v));
} }
iterator insert(const_iterator position, const value_type &v) { iterator insert(const_iterator hint, const value_type &v) {
return this->tree_ return this->tree_
.insert_hint_unique(iterator(position), params_type::key(v), v) .insert_hint_unique(iterator(hint), params_type::key(v), v)
.first; .first;
} }
iterator insert(const_iterator position, value_type &&v) { iterator insert(const_iterator hint, value_type &&v) {
return this->tree_ return this->tree_
.insert_hint_unique(iterator(position), params_type::key(v), .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
std::move(v))
.first; .first;
} }
template <typename... Args> template <typename... Args>
iterator emplace_hint(const_iterator position, Args &&... args) { iterator emplace_hint(const_iterator hint, Args &&... args) {
init_type v(std::forward<Args>(args)...); init_type v(std::forward<Args>(args)...);
return this->tree_ return this->tree_
.insert_hint_unique(iterator(position), params_type::key(v), .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
std::move(v))
.first; .first;
} }
template <typename InputIterator> template <typename InputIterator>
void insert(InputIterator b, InputIterator e) { void insert(InputIterator b, InputIterator e) {
this->tree_.insert_iterator_unique(b, e); this->tree_.insert_iterator_unique(b, e, 0);
} }
void insert(std::initializer_list<init_type> init) { void insert(std::initializer_list<init_type> init) {
this->tree_.insert_iterator_unique(init.begin(), init.end()); this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
} }
insert_return_type insert(node_type &&node) { insert_return_type insert(node_type &&node) {
if (!node) return {this->end(), false, node_type()}; if (!node) return {this->end(), false, node_type()};
@ -315,14 +337,10 @@ class btree_set_container : public btree_container<Tree> {
return res.first; return res.first;
} }
// Deletion routines.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_unique(key);
}
using super_type::erase;
// Node extraction routines. // Node extraction routines.
// TODO(ezb): when the comparator is heterogeneous and has different
// equivalence classes for different lookup types, we should extract the first
// equivalent value if there are multiple.
template <typename K = key_type> template <typename K = key_type>
node_type extract(const key_arg<K> &key) { node_type extract(const key_arg<K> &key) {
auto it = this->find(key); auto it = this->find(key);
@ -344,7 +362,7 @@ class btree_set_container : public btree_container<Tree> {
int> = 0> int> = 0>
void merge(btree_container<T> &src) { // NOLINT void merge(btree_container<T> &src) { // NOLINT
for (auto src_it = src.begin(); src_it != src.end();) { for (auto src_it = src.begin(); src_it != src.end();) {
if (insert(std::move(*src_it)).second) { if (insert(std::move(params_type::element(src_it.slot()))).second) {
src_it = src.erase(src_it); src_it = src.erase(src_it);
} else { } else {
++src_it; ++src_it;
@ -371,6 +389,7 @@ template <typename Tree>
class btree_map_container : public btree_set_container<Tree> { class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>; using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type; using params_type = typename Tree::params_type;
friend class BtreeNodePeer;
private: private:
template <class K> template <class K>
@ -392,111 +411,72 @@ class btree_map_container : public btree_set_container<Tree> {
// Insertion routines. // Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow // Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments. // for supporting bitfield arguments.
// Note: when we call `std::forward<M>(obj)` twice, it's safe because template <typename K = key_type, class M>
// insert_unique/insert_hint_unique are guaranteed to not consume `obj` when std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
// `ret.second` is false. const M &obj) {
template <class M> return insert_or_assign_impl(k, obj);
std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
if (!ret.second) ret.first->second = obj;
return ret;
} }
template <class M, key_type * = nullptr> template <typename K = key_type, class M, K * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) { std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
const std::pair<iterator, bool> ret = return insert_or_assign_impl(std::forward<K>(k), obj);
this->tree_.insert_unique(k, std::move(k), obj);
if (!ret.second) ret.first->second = obj;
return ret;
} }
template <class M, M * = nullptr> template <typename K = key_type, class M, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) { std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
const std::pair<iterator, bool> ret = return insert_or_assign_impl(k, std::forward<M>(obj));
this->tree_.insert_unique(k, k, std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret;
} }
template <class M, key_type * = nullptr, M * = nullptr> template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) { std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
const std::pair<iterator, bool> ret = return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret;
} }
template <class M> template <typename K = key_type, class M>
iterator insert_or_assign(const_iterator position, const key_type &k, iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
const M &obj) { const M &obj) {
const std::pair<iterator, bool> ret = return insert_or_assign_hint_impl(hint, k, obj);
this->tree_.insert_hint_unique(iterator(position), k, k, obj);
if (!ret.second) ret.first->second = obj;
return ret.first;
} }
template <class M, key_type * = nullptr> template <typename K = key_type, class M, K * = nullptr>
iterator insert_or_assign(const_iterator position, key_type &&k, iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
const M &obj) { return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(position), k, std::move(k), obj);
if (!ret.second) ret.first->second = obj;
return ret.first;
} }
template <class M, M * = nullptr> template <typename K = key_type, class M, M * = nullptr>
iterator insert_or_assign(const_iterator position, const key_type &k, iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
M &&obj) { return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(position), k, k, std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret.first;
} }
template <class M, key_type * = nullptr, M * = nullptr> template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) { iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique( return insert_or_assign_hint_impl(hint, std::forward<K>(k),
iterator(position), k, std::move(k), std::forward<M>(obj)); std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret.first;
} }
template <typename... Args>
std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) { template <typename K = key_type, typename... Args,
return this->tree_.insert_unique( typename absl::enable_if_t<
k, std::piecewise_construct, std::forward_as_tuple(k), !std::is_convertible<K, const_iterator>::value, int> = 0>
std::forward_as_tuple(std::forward<Args>(args)...)); std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
return try_emplace_impl(k, std::forward<Args>(args)...);
} }
template <typename... Args> template <typename K = key_type, typename... Args,
std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) { typename absl::enable_if_t<
// Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` !std::is_convertible<K, const_iterator>::value, int> = 0>
// and then using `k` unsequenced. This is safe because the move is into a std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
// forwarding reference and insert_unique guarantees that `key` is never return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
// referenced after consuming `args`.
const key_type &key_ref = k;
return this->tree_.insert_unique(
key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
} }
template <typename... Args> template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, const key_type &k, iterator try_emplace(const_iterator hint, const key_arg<K> &k,
Args &&... args) { Args &&... args) {
return this->tree_ return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
.insert_hint_unique(iterator(hint), k, std::piecewise_construct,
std::forward_as_tuple(k),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
} }
template <typename... Args> template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
// Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` return try_emplace_hint_impl(hint, std::forward<K>(k),
// and then using `k` unsequenced. This is safe because the move is into a std::forward<Args>(args)...);
// forwarding reference and insert_hint_unique guarantees that `key` is
// never referenced after consuming `args`.
const key_type &key_ref = k;
return this->tree_
.insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
std::forward_as_tuple(std::move(k)),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
} }
mapped_type &operator[](const key_type &k) {
template <typename K = key_type>
mapped_type &operator[](const key_arg<K> &k) {
return try_emplace(k).first->second; return try_emplace(k).first->second;
} }
mapped_type &operator[](key_type &&k) { template <typename K = key_type>
return try_emplace(std::move(k)).first->second; mapped_type &operator[](key_arg<K> &&k) {
return try_emplace(std::forward<K>(k)).first->second;
} }
template <typename K = key_type> template <typename K = key_type>
@ -513,6 +493,40 @@ class btree_map_container : public btree_set_container<Tree> {
base_internal::ThrowStdOutOfRange("absl::btree_map::at"); base_internal::ThrowStdOutOfRange("absl::btree_map::at");
return it->second; return it->second;
} }
private:
// Note: when we call `std::forward<M>(obj)` twice, it's safe because
// insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
// `ret.second` is false.
template <class K, class M>
std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret;
}
template <class K, class M>
iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret.first;
}
template <class K, class... Args>
std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
return this->tree_.insert_unique(
k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
}
template <class K, class... Args>
iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
return this->tree_
.insert_hint_unique(iterator(hint), k, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
}
}; };
// A common base class for btree_multiset and btree_multimap. // A common base class for btree_multiset and btree_multimap.
@ -540,7 +554,7 @@ class btree_multiset_container : public btree_container<Tree> {
using super_type::super_type; using super_type::super_type;
btree_multiset_container() {} btree_multiset_container() {}
// Range constructor. // Range constructors.
template <class InputIterator> template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e, btree_multiset_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
@ -548,29 +562,30 @@ class btree_multiset_container : public btree_container<Tree> {
: super_type(comp, alloc) { : super_type(comp, alloc) {
insert(b, e); insert(b, e);
} }
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_multiset_container(b, e, key_compare(), alloc) {}
// Initializer list constructor. // Initializer list constructors.
btree_multiset_container(std::initializer_list<init_type> init, btree_multiset_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(), const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type()) const allocator_type &alloc = allocator_type())
: btree_multiset_container(init.begin(), init.end(), comp, alloc) {} : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
btree_multiset_container(std::initializer_list<init_type> init,
// Lookup routines. const allocator_type &alloc)
template <typename K = key_type> : btree_multiset_container(init.begin(), init.end(), alloc) {}
size_type count(const key_arg<K> &key) const {
return this->tree_.count_multi(key);
}
// Insertion routines. // Insertion routines.
iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
iterator insert(value_type &&v) { iterator insert(value_type &&v) {
return this->tree_.insert_multi(std::move(v)); return this->tree_.insert_multi(std::move(v));
} }
iterator insert(const_iterator position, const value_type &v) { iterator insert(const_iterator hint, const value_type &v) {
return this->tree_.insert_hint_multi(iterator(position), v); return this->tree_.insert_hint_multi(iterator(hint), v);
} }
iterator insert(const_iterator position, value_type &&v) { iterator insert(const_iterator hint, value_type &&v) {
return this->tree_.insert_hint_multi(iterator(position), std::move(v)); return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
} }
template <typename InputIterator> template <typename InputIterator>
void insert(InputIterator b, InputIterator e) { void insert(InputIterator b, InputIterator e) {
@ -584,9 +599,9 @@ class btree_multiset_container : public btree_container<Tree> {
return this->tree_.insert_multi(init_type(std::forward<Args>(args)...)); return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
} }
template <typename... Args> template <typename... Args>
iterator emplace_hint(const_iterator position, Args &&... args) { iterator emplace_hint(const_iterator hint, Args &&... args) {
return this->tree_.insert_hint_multi( return this->tree_.insert_hint_multi(
iterator(position), init_type(std::forward<Args>(args)...)); iterator(hint), init_type(std::forward<Args>(args)...));
} }
iterator insert(node_type &&node) { iterator insert(node_type &&node) {
if (!node) return this->end(); if (!node) return this->end();
@ -605,14 +620,9 @@ class btree_multiset_container : public btree_container<Tree> {
return res; return res;
} }
// Deletion routines.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_multi(key);
}
using super_type::erase;
// Node extraction routines. // Node extraction routines.
// TODO(ezb): we are supposed to extract the first equivalent key if there are
// multiple, but this isn't guaranteed to extract the first one.
template <typename K = key_type> template <typename K = key_type>
node_type extract(const key_arg<K> &key) { node_type extract(const key_arg<K> &key) {
auto it = this->find(key); auto it = this->find(key);
@ -632,8 +642,9 @@ class btree_multiset_container : public btree_container<Tree> {
typename T::params_type::is_map_container>>::value, typename T::params_type::is_map_container>>::value,
int> = 0> int> = 0>
void merge(btree_container<T> &src) { // NOLINT void merge(btree_container<T> &src) { // NOLINT
insert(std::make_move_iterator(src.begin()), for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
std::make_move_iterator(src.end())); insert(std::move(params_type::element(src_it.slot())));
}
src.clear(); src.clear();
} }

View file

@ -146,8 +146,11 @@ class node_handle<Policy, PolicyTraits, Alloc,
constexpr node_handle() {} constexpr node_handle() {}
auto key() const -> decltype(PolicyTraits::key(std::declval<slot_type*>())) { // When C++17 is available, we can use std::launder to provide mutable
return PolicyTraits::key(this->slot()); // access to the key. Otherwise, we provide const access.
auto key() const
-> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
return PolicyTraits::mutable_key(this->slot());
} }
mapped_type& mapped() const { mapped_type& mapped() const {

View file

@ -257,7 +257,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
template <int I> template <int I>
ElemT<I>& get() & { ElemT<I>& get() & {
return internal_compressed_tuple::Storage<ElemT<I>, I>::get(); return StorageT<I>::get();
} }
template <int I> template <int I>

View file

@ -15,25 +15,27 @@
#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#ifdef MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
#endif
#include <cassert> #include <cassert>
#include <cstddef> #include <cstddef>
#include <memory> #include <memory>
#include <new>
#include <tuple> #include <tuple>
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
#include "absl/base/config.h"
#include "absl/memory/memory.h" #include "absl/memory/memory.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
#include "absl/utility/utility.h" #include "absl/utility/utility.h"
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
#endif
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace container_internal { namespace container_internal {
@ -55,8 +57,11 @@ void* Allocate(Alloc* alloc, size_t n) {
using M = AlignedType<Alignment>; using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>; using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>; using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A mem_alloc(*alloc); // On macOS, "mem_alloc" is a #define with one argument defined in
void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 && assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment"); "allocator does not respect alignment");
return p; return p;
@ -71,8 +76,11 @@ void Deallocate(Alloc* alloc, void* p, size_t n) {
using M = AlignedType<Alignment>; using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>; using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>; using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A mem_alloc(*alloc); // On macOS, "mem_alloc" is a #define with one argument defined in
AT::deallocate(mem_alloc, static_cast<M*>(p), // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M)); (n + sizeof(M) - 1) / sizeof(M));
} }
@ -209,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) {
// Helper functions for asan and msan. // Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s); ASAN_POISON_MEMORY_REGION(m, s);
#endif #endif
#ifdef MEMORY_SANITIZER #ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s); __msan_poison(m, s);
#endif #endif
(void)m; (void)m;
@ -220,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
} }
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s); ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif #endif
#ifdef MEMORY_SANITIZER #ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s); __msan_unpoison(m, s);
#endif #endif
(void)m; (void)m;
@ -351,6 +359,20 @@ struct map_slot_policy {
return slot->value; return slot->value;
} }
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
static K& mutable_key(slot_type* slot) {
// Still check for kMutableKeys so that we can avoid calling std::launder
// unless necessary because it can interfere with optimizations.
return kMutableKeys::value ? slot->key
: *std::launder(const_cast<K*>(
std::addressof(slot->value.first)));
}
#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
static const K& mutable_key(slot_type* slot) { return key(slot); }
#endif
static const K& key(const slot_type* slot) { static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first; return kMutableKeys::value ? slot->key : slot->value.first;
} }
@ -429,13 +451,6 @@ struct map_slot_policy {
std::move(src->value)); std::move(src->value));
} }
} }
template <class Allocator>
static void move(Allocator* alloc, slot_type* first, slot_type* last,
slot_type* result) {
for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
move(alloc, src, dest);
}
}; };
} // namespace container_internal } // namespace container_internal

View file

@ -337,11 +337,11 @@ ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
enum Hash : size_t { enum Hash : size_t {
kStd = 0x2, // std::hash kStd = 0x1, // std::hash
#ifdef _MSC_VER #ifdef _MSC_VER
kExtension = kStd, // In MSVC, std::hash == ::hash kExtension = kStd, // In MSVC, std::hash == ::hash
#else // _MSC_VER #else // _MSC_VER
kExtension = 0x4, // ::hash (GCC extension) kExtension = 0x2, // ::hash (GCC extension)
#endif // _MSC_VER #endif // _MSC_VER
}; };

View file

@ -41,8 +41,10 @@ class RandomDeviceSeedSeq {
} // namespace } // namespace
std::mt19937_64* GetSharedRng() { std::mt19937_64* GetSharedRng() {
RandomDeviceSeedSeq seed_seq; static auto* rng = [] {
static auto* rng = new std::mt19937_64(seed_seq); RandomDeviceSeedSeq seed_seq;
return new std::mt19937_64(seed_seq);
}();
return rng; return rng;
} }

View file

@ -17,6 +17,7 @@
#include <cstddef> #include <cstddef>
#include <memory> #include <memory>
#include <new>
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
@ -29,15 +30,34 @@ namespace container_internal {
// Defines how slots are initialized/destroyed/moved. // Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void> template <class Policy, class = void>
struct hash_policy_traits { struct hash_policy_traits {
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;
private: private:
struct ReturnKey { struct ReturnKey {
// We return `Key` here. // When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Key,
absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
static key_type& Impl(Key&& k, int) {
return *std::launder(
const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
}
#endif
template <class Key>
static Key Impl(Key&& k, char) {
return std::forward<Key>(k);
}
// When Key=T&, we forward the lvalue reference. // When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference. // When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map. // eg, for string_hash_map.
template <class Key, class... Args> template <class Key, class... Args>
Key operator()(Key&& k, const Args&...) const { auto operator()(Key&& k, const Args&...) const
return std::forward<Key>(k); -> decltype(Impl(std::forward<Key>(k), 0)) {
return Impl(std::forward<Key>(k), 0);
} }
}; };
@ -52,9 +72,6 @@ struct hash_policy_traits {
// The actual object stored in the hash table. // The actual object stored in the hash table.
using slot_type = typename Policy::slot_type; using slot_type = typename Policy::slot_type;
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;
// The argument type for insertions into the hashtable. This is different // The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor // from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details. // and insert() member functions for more details.
@ -156,7 +173,7 @@ struct hash_policy_traits {
// Returns the "key" portion of the slot. // Returns the "key" portion of the slot.
// Used for node handle manipulation. // Used for node handle manipulation.
template <class P = Policy> template <class P = Policy>
static auto key(slot_type* slot) static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) { -> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot)); return P::apply(ReturnKey(), element(slot));
} }

View file

@ -67,6 +67,7 @@ void HashtablezInfo::PrepareForSampling() {
capacity.store(0, std::memory_order_relaxed); capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed);
num_rehashes.store(0, std::memory_order_relaxed);
max_probe_length.store(0, std::memory_order_relaxed); max_probe_length.store(0, std::memory_order_relaxed);
total_probe_length.store(0, std::memory_order_relaxed); total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed); hashes_bitwise_or.store(0, std::memory_order_relaxed);

View file

@ -73,6 +73,7 @@ struct HashtablezInfo {
std::atomic<size_t> capacity; std::atomic<size_t> capacity;
std::atomic<size_t> size; std::atomic<size_t> size;
std::atomic<size_t> num_erases; std::atomic<size_t> num_erases;
std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length; std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length; std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or; std::atomic<size_t> hashes_bitwise_or;
@ -105,6 +106,11 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
#endif #endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed); info->num_erases.store(0, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_rehashes.store(
1 + info->num_rehashes.load(std::memory_order_relaxed),
std::memory_order_relaxed);
} }
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@ -113,7 +119,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
info->capacity.store(capacity, std::memory_order_relaxed); info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) { if (size == 0) {
// This is a clear, reset the total/num_erases too. // This is a clear, reset the total/num_erases too.
RecordRehashSlow(info, 0); info->total_probe_length.store(0, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
} }
} }
@ -122,12 +129,21 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
inline void RecordEraseSlow(HashtablezInfo* info) { inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed); info->size.fetch_sub(1, std::memory_order_relaxed);
info->num_erases.fetch_add(1, std::memory_order_relaxed); // There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_erases.store(
1 + info->num_erases.load(std::memory_order_relaxed),
std::memory_order_relaxed);
} }
HashtablezInfo* SampleSlow(int64_t* next_sample); HashtablezInfo* SampleSlow(int64_t* next_sample);
void UnsampleSlow(HashtablezInfo* info); void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle { class HashtablezInfoHandle {
public: public:
explicit HashtablezInfoHandle() : info_(nullptr) {} explicit HashtablezInfoHandle() : info_(nullptr) {}
@ -179,14 +195,27 @@ class HashtablezInfoHandle {
friend class HashtablezInfoHandlePeer; friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_; HashtablezInfo* info_;
}; };
#else
// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
// be removed by the linker, in order to reduce the binary size.
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() = default;
explicit HashtablezInfoHandle(std::nullptr_t) {}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
inline void RecordErase() {}
friend inline void swap(HashtablezInfoHandle& /*lhs*/,
HashtablezInfoHandle& /*rhs*/) {}
};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
#endif // ABSL_PER_THREAD_TLS #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation // Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler. // with the global sampler.

View file

@ -38,6 +38,7 @@ constexpr int kProbeLength = 8;
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace container_internal { namespace container_internal {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer { class HashtablezInfoHandlePeer {
public: public:
static bool IsSampled(const HashtablezInfoHandle& h) { static bool IsSampled(const HashtablezInfoHandle& h) {
@ -46,6 +47,13 @@ class HashtablezInfoHandlePeer {
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
}; };
#else
class HashtablezInfoHandlePeer {
public:
static bool IsSampled(const HashtablezInfoHandle&) { return false; }
static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
namespace { namespace {
using ::absl::synchronization_internal::ThreadPool; using ::absl::synchronization_internal::ThreadPool;
@ -76,6 +84,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@ -95,6 +104,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@ -167,9 +177,10 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.size.load(), 2); EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
} }
#if defined(ABSL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) { TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezEnabled(true); SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100); SetHashtablezSampleParameter(100);
@ -213,7 +224,6 @@ TEST(HashtablezSamplerTest, Sample) {
} }
EXPECT_NEAR(sample_rate, 0.01, 0.005); EXPECT_NEAR(sample_rate, 0.01, 0.005);
} }
#endif
TEST(HashtablezSamplerTest, Handle) { TEST(HashtablezSamplerTest, Handle) {
auto& sampler = HashtablezSampler::Global(); auto& sampler = HashtablezSampler::Global();
@ -243,6 +253,8 @@ TEST(HashtablezSamplerTest, Handle) {
}); });
EXPECT_FALSE(found); EXPECT_FALSE(found);
} }
#endif
TEST(HashtablezSamplerTest, Registration) { TEST(HashtablezSamplerTest, Registration) {
HashtablezSampler sampler; HashtablezSampler sampler;

View file

@ -462,6 +462,9 @@ class Storage {
Inlined inlined; Inlined inlined;
}; };
template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args);
Metadata metadata_; Metadata metadata_;
Data data_; Data data_;
}; };
@ -542,48 +545,42 @@ template <typename T, size_t N, typename A>
template <typename ValueAdapter> template <typename ValueAdapter>
auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void { auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
StorageView storage_view = MakeStorageView(); StorageView storage_view = MakeStorageView();
auto* const base = storage_view.data;
IteratorValueAdapter<MoveIterator> move_values( const size_type size = storage_view.size;
MoveIterator(storage_view.data)); auto* alloc = GetAllocPtr();
if (new_size <= size) {
AllocationTransaction allocation_tx(GetAllocPtr()); // Destroy extra old elements.
ConstructionTransaction construction_tx(GetAllocPtr()); inlined_vector_internal::DestroyElements(alloc, base + new_size,
size - new_size);
absl::Span<value_type> construct_loop; } else if (new_size <= storage_view.capacity) {
absl::Span<value_type> move_construct_loop; // Construct new elements in place.
absl::Span<value_type> destroy_loop; inlined_vector_internal::ConstructElements(alloc, base + size, &values,
new_size - size);
if (new_size > storage_view.capacity) { } else {
// Steps:
// a. Allocate new backing store.
// b. Construct new elements in new backing store.
// c. Move existing elements from old backing store to now.
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
AllocationTransaction allocation_tx(alloc);
size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
pointer new_data = allocation_tx.Allocate(new_capacity); pointer new_data = allocation_tx.Allocate(new_capacity);
construct_loop = {new_data + storage_view.size,
new_size - storage_view.size};
move_construct_loop = {new_data, storage_view.size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
construct_loop = {storage_view.data + storage_view.size,
new_size - storage_view.size};
} else {
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
construction_tx.Construct(construct_loop.data(), &values, ConstructionTransaction construction_tx(alloc);
construct_loop.size()); construction_tx.Construct(new_data + size, &values, new_size - size);
inlined_vector_internal::ConstructElements( IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base)));
GetAllocPtr(), move_construct_loop.data(), &move_values, inlined_vector_internal::ConstructElements(alloc, new_data, &move_values,
move_construct_loop.size()); size);
inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), inlined_vector_internal::DestroyElements(alloc, base, size);
destroy_loop.size()); construction_tx.Commit();
construction_tx.Commit();
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated(); DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx); AcquireAllocatedData(&allocation_tx);
SetIsAllocated(); SetIsAllocated();
} }
SetSize(new_size); SetSize(new_size);
} }
@ -684,44 +681,50 @@ template <typename T, size_t N, typename A>
template <typename... Args> template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
StorageView storage_view = MakeStorageView(); StorageView storage_view = MakeStorageView();
const auto n = storage_view.size;
if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
// Fast path; new element fits.
pointer last_ptr = storage_view.data + n;
AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
std::forward<Args>(args)...);
AddSize(1);
return *last_ptr;
}
// TODO(b/173712035): Annotate with musttail attribute to prevent regression.
return EmplaceBackSlow(std::forward<Args>(args)...);
}
template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference {
StorageView storage_view = MakeStorageView();
AllocationTransaction allocation_tx(GetAllocPtr()); AllocationTransaction allocation_tx(GetAllocPtr());
IteratorValueAdapter<MoveIterator> move_values( IteratorValueAdapter<MoveIterator> move_values(
MoveIterator(storage_view.data)); MoveIterator(storage_view.data));
size_type new_capacity = NextCapacity(storage_view.capacity);
pointer construct_data; pointer construct_data = allocation_tx.Allocate(new_capacity);
if (storage_view.size == storage_view.capacity) {
size_type new_capacity = NextCapacity(storage_view.capacity);
construct_data = allocation_tx.Allocate(new_capacity);
} else {
construct_data = storage_view.data;
}
pointer last_ptr = construct_data + storage_view.size; pointer last_ptr = construct_data + storage_view.size;
// Construct new element.
AllocatorTraits::construct(*GetAllocPtr(), last_ptr, AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
std::forward<Args>(args)...); std::forward<Args>(args)...);
// Move elements from old backing store to new backing store.
if (allocation_tx.DidAllocate()) { ABSL_INTERNAL_TRY {
ABSL_INTERNAL_TRY { inlined_vector_internal::ConstructElements(
inlined_vector_internal::ConstructElements( GetAllocPtr(), allocation_tx.GetData(), &move_values,
GetAllocPtr(), allocation_tx.GetData(), &move_values, storage_view.size);
storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
} }
ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
AddSize(1); AddSize(1);
return *last_ptr; return *last_ptr;
} }

View file

@ -163,6 +163,7 @@
#include <assert.h> #include <assert.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <ostream> #include <ostream>
#include <string> #include <string>
#include <tuple> #include <tuple>
@ -170,15 +171,16 @@
#include <typeinfo> #include <typeinfo>
#include <utility> #include <utility>
#ifdef ADDRESS_SANITIZER #include "absl/base/config.h"
#include <sanitizer/asan_interface.h>
#endif
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h" #include "absl/strings/str_cat.h"
#include "absl/types/span.h" #include "absl/types/span.h"
#include "absl/utility/utility.h" #include "absl/utility/utility.h"
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#if defined(__GXX_RTTI) #if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE #define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif #endif
@ -614,7 +616,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
void PoisonPadding(const Char* p) const { void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds"); static_assert(N < NumOffsets, "Index out of bounds");
(void)p; (void)p;
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p); PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour. // The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) { if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {

View file

@ -17,6 +17,7 @@
// We need ::max_align_t because some libstdc++ versions don't provide // We need ::max_align_t because some libstdc++ versions don't provide
// std::max_align_t // std::max_align_t
#include <stddef.h> #include <stddef.h>
#include <cstdint> #include <cstdint>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
@ -24,6 +25,7 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/types/span.h" #include "absl/types/span.h"
@ -126,8 +128,10 @@ TEST(Layout, ElementTypes) {
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>(); SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>(); SameType<std::tuple<int32_t, int32_t>,
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>(); decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>,
decltype(L::Partial(0))::ElementTypes>();
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
@ -366,18 +370,21 @@ TEST(Layout, PointerByIndex) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
} }
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p)))); Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, EXPECT_EQ(12,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p)))); Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(
12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p)))); EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
} }
@ -385,39 +392,44 @@ TEST(Layout, PointerByIndex) {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p)))); EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p)))); EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p)))); EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p)))); Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p)))); 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p)))); Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(4, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p)))); 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8, EXPECT_EQ(8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p)))); Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(8, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p)))); 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p)))); 0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p)))); 0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ( EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p)))); 4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p)))); 8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
@ -426,7 +438,8 @@ TEST(Layout, PointerByIndex) {
24, 24,
Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p)))); Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p)))); 8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@ -437,75 +450,78 @@ TEST(Layout, PointerByType) {
alignas(max_align_t) const unsigned char p[100] = {}; alignas(max_align_t) const unsigned char p[100] = {};
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p)))); 0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p)))); 0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p)))); 0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p)))); Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p)))); 0,
EXPECT_EQ( Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p)))); EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
8, 8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p)))); Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p)))); 0,
EXPECT_EQ( Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p)))); EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p)))); Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
0, L::Partial(0, 0, 0).Pointer<int8_t>(p))));
Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
EXPECT_EQ( L::Partial(0, 0, 0).Pointer<int32_t>(p))));
0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const Int128*>( EXPECT_EQ(0, Distance(p, Type<const Int128*>(
L::Partial(0, 0, 0).Pointer<Int128>(p)))); L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
0, L::Partial(1, 0, 0).Pointer<int8_t>(p))));
Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p)))); EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
EXPECT_EQ( L::Partial(1, 0, 0).Pointer<int32_t>(p))));
4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const Int128*>( EXPECT_EQ(8, Distance(p, Type<const Int128*>(
L::Partial(1, 0, 0).Pointer<Int128>(p)))); L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
0, L::Partial(5, 3, 1).Pointer<int8_t>(p))));
Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>( EXPECT_EQ(24, Distance(p, Type<const Int128*>(
L::Partial(5, 3, 1).Pointer<Int128>(p)))); L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ( EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
8, L::Partial(5, 3, 1).Pointer<int32_t>(p))));
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p)))); Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p)))); EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
} }
} }
@ -546,15 +562,18 @@ TEST(Layout, MutablePointerByIndex) {
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p)))); EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p)))); EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p)))); EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p)))); Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@ -566,48 +585,61 @@ TEST(Layout, MutablePointerByType) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p)))); EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p)))); EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p)))); EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p)))); EXPECT_EQ(8,
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p)))); Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p)))); EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, EXPECT_EQ(0,
Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p)))); Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p)))); EXPECT_EQ(0,
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p)))); Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, EXPECT_EQ(8,
Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p)))); Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p)))); EXPECT_EQ(0,
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p)))); Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p)))); Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p)))); 0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p)))); 0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p)))); 0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p)))); 0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, EXPECT_EQ(
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p)))); 4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p)))); 8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p)))); 0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ( EXPECT_EQ(
24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p)))); 24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, EXPECT_EQ(
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p)))); 8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p)))); EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p)))); EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p)))); EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
@ -788,67 +820,72 @@ TEST(Layout, SliceByIndexData) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data())); EXPECT_EQ(0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
} }
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
12, 12,
Distance(p, Distance(
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data())); p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data())); 0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(12, EXPECT_EQ(
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data())); 12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data())); p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
@ -862,7 +899,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data())); p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4,
Distance( Distance(
@ -876,7 +914,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data())); p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance( Distance(
@ -888,12 +927,14 @@ TEST(Layout, SliceByIndexData) {
p, p,
Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data())); Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data())); 0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data())); Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data())); 8,
Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
} }
} }
@ -904,98 +945,94 @@ TEST(Layout, SliceByTypeData) {
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data())); p,
Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data())); p,
Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data())); 0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data())); p,
Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data())); Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data())); Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)) Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
.data())); .data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(1, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
L::Partial(5, 3).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(0, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>( EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
L::Partial(0, 0, 0).Slice<Int128>(p)) L::Partial(0, 0, 0).Slice<Int128>(p))
.data())); .data()));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
0, L::Partial(1, 0, 0).Slice<int8_t>(p))
Distance( .data()));
p, EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data())); L::Partial(1, 0, 0).Slice<int32_t>(p))
EXPECT_EQ( .data()));
4,
Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>( EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
L::Partial(1, 0, 0).Slice<Int128>(p)) L::Partial(1, 0, 0).Slice<Int128>(p))
.data())); .data()));
EXPECT_EQ( EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
0, L::Partial(5, 3, 1).Slice<int8_t>(p))
Distance( .data()));
p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>( EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
L::Partial(5, 3, 1).Slice<Int128>(p)) L::Partial(5, 3, 1).Slice<Int128>(p))
.data())); .data()));
EXPECT_EQ( EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
8, L::Partial(5, 3, 1).Slice<int32_t>(p))
Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)) .data()));
.data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data())); Distance(p,
Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Distance(p,
Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data())); Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance( 8,
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data())); Distance(
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
} }
} }
@ -1003,18 +1040,19 @@ TEST(Layout, MutableSliceByIndexData) {
alignas(max_align_t) unsigned char p[100]; alignas(max_align_t) unsigned char p[100];
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0, EXPECT_EQ(
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data())); EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
} }
{ {
using L = Layout<int32_t, int32_t>; using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
12, 12,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data())); Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
@ -1023,55 +1061,63 @@ TEST(Layout, MutableSliceByIndexData) {
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data())); 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data())); 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data())); Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data())); Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0, Distance(
p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data())); p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data())); p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data())); p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance( 8, Distance(
p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data())); p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data())); p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, Distance( 24, Distance(
p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data())); p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, 8, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data())); p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(24, EXPECT_EQ(24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data())); Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data())); EXPECT_EQ(8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
} }
} }
@ -1080,66 +1126,84 @@ TEST(Layout, MutableSliceByTypeData) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data())); p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0, Distance(
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data())); p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data())); EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
} }
{ {
using L = Layout<int8_t, int32_t, Int128>; using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ( EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data())); 0,
EXPECT_EQ( Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data())); Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data())); Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4, Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data())); Distance(p,
EXPECT_EQ( Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
8, Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data())); p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, 0,
Distance( Distance(
p, p,
Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data())); Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data())); Distance(
p,
Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
4, 4,
Distance( Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data())); p,
Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, 8,
Distance( Distance(
p, p,
Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data())); Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
0, Distance( 0,
p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data())); Distance(
p,
Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance( Distance(
@ -1148,14 +1212,16 @@ TEST(Layout, MutableSliceByTypeData) {
EXPECT_EQ( EXPECT_EQ(
8, 8,
Distance( Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data())); p,
EXPECT_EQ(0, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data())); EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
24, 24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data())); Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ( EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data())); 8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
} }
} }
@ -1254,17 +1320,17 @@ TEST(Layout, MutableSlices) {
} }
{ {
const auto x = L::Partial(1, 2, 3); const auto x = L::Partial(1, 2, 3);
EXPECT_THAT( EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))), x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p)))); IsSameSlice(x.Slice<2>(p))));
} }
{ {
const L x(1, 2, 3); const L x(1, 2, 3);
EXPECT_THAT( EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))), x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p)))); IsSameSlice(x.Slice<2>(p))));
} }
} }
@ -1314,7 +1380,7 @@ struct Region {
}; };
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) { for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
} }
@ -1396,7 +1462,8 @@ TEST(Layout, DebugString) {
x.DebugString()); x.DebugString());
} }
{ {
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3); constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
EXPECT_EQ( EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" + "@16" +
@ -1404,7 +1471,8 @@ TEST(Layout, DebugString) {
x.DebugString()); x.DebugString());
} }
{ {
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4); constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
EXPECT_EQ( EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; " "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" + "@16" +

View file

@ -27,7 +27,7 @@ constexpr size_t Group::kWidth;
// Returns "random" seed. // Returns "random" seed.
inline size_t RandomSeed() { inline size_t RandomSeed() {
#if ABSL_HAVE_THREAD_LOCAL #ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0; static thread_local size_t counter = 0;
size_t value = ++counter; size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL #else // ABSL_HAVE_THREAD_LOCAL
@ -43,6 +43,19 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
} }
void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl

View file

@ -122,6 +122,16 @@ namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace container_internal { namespace container_internal {
template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
std::true_type /* propagate_on_container_swap */) {
using std::swap;
swap(lhs, rhs);
}
template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
template <size_t Width> template <size_t Width>
class probe_seq { class probe_seq {
public: public:
@ -169,10 +179,14 @@ struct IsDecomposable<
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T> template <class T>
constexpr bool IsNoThrowSwappable() { constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap; using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>())); return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
} }
template <class T>
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
return false;
}
template <typename T> template <typename T>
int TrailingZeros(T x) { int TrailingZeros(T x) {
@ -458,17 +472,7 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// DELETED -> EMPTY // DELETED -> EMPTY
// EMPTY -> EMPTY // EMPTY -> EMPTY
// FULL -> DELETED // FULL -> DELETED
inline void ConvertDeletedToEmptyAndFullToDeleted( void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) { inline size_t NormalizeCapacity(size_t n) {
@ -497,6 +501,76 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7); return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
} }
inline void AssertIsFull(ctrl_t* ctrl) {
ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
"Invalid operation on iterator. The element might have "
"been erased, or the table might have rehashed.");
}
inline void AssertIsValid(ctrl_t* ctrl) {
ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
"Invalid operation on iterator. The element might have "
"been erased, or the table might have rehashed.");
}
struct FindInfo {
size_t offset;
size_t probe_length;
};
// The representation of the object has two modes:
// - small: For capacities < kWidth-1
// - large: For the rest.
//
// Differences:
// - In small mode we are able to use the whole capacity. The extra control
// bytes give us at least one "empty" control byte to stop the iteration.
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
Group g{ctrl + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
assert(seq.index() < capacity && "full table!");
}
}
// Policy: a policy defines how to perform different operations on // Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface // the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy). // of policy).
@ -511,7 +585,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
// if they are equal, false if they are not. If two keys compare equal, then // if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal. // their hash values as defined by Hash MUST be equal.
// //
// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which // Allocator: an Allocator
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be // the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed. // constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc> template <class Policy, class Hash, class Eq, class Alloc>
@ -617,7 +692,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator. // PRECONDITION: not an end() iterator.
reference operator*() const { reference operator*() const {
assert_is_full(); AssertIsFull(ctrl_);
return PolicyTraits::element(slot_); return PolicyTraits::element(slot_);
} }
@ -626,7 +701,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator. // PRECONDITION: not an end() iterator.
iterator& operator++() { iterator& operator++() {
assert_is_full(); AssertIsFull(ctrl_);
++ctrl_; ++ctrl_;
++slot_; ++slot_;
skip_empty_or_deleted(); skip_empty_or_deleted();
@ -640,8 +715,8 @@ class raw_hash_set {
} }
friend bool operator==(const iterator& a, const iterator& b) { friend bool operator==(const iterator& a, const iterator& b) {
a.assert_is_valid(); AssertIsValid(a.ctrl_);
b.assert_is_valid(); AssertIsValid(b.ctrl_);
return a.ctrl_ == b.ctrl_; return a.ctrl_ == b.ctrl_;
} }
friend bool operator!=(const iterator& a, const iterator& b) { friend bool operator!=(const iterator& a, const iterator& b) {
@ -655,13 +730,6 @@ class raw_hash_set {
ABSL_INTERNAL_ASSUME(ctrl != nullptr); ABSL_INTERNAL_ASSUME(ctrl != nullptr);
} }
void assert_is_full() const {
ABSL_HARDENING_ASSERT(ctrl_ != nullptr && IsFull(*ctrl_));
}
void assert_is_valid() const {
ABSL_HARDENING_ASSERT(ctrl_ == nullptr || IsFull(*ctrl_));
}
void skip_empty_or_deleted() { void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) { while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@ -730,7 +798,6 @@ class raw_hash_set {
: ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
if (bucket_count) { if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count); capacity_ = NormalizeCapacity(bucket_count);
reset_growth_left();
initialize_slots(); initialize_slots();
} }
} }
@ -836,7 +903,7 @@ class raw_hash_set {
// than a full `insert`. // than a full `insert`.
for (const auto& v : that) { for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
set_ctrl(target.offset, H2(hash)); set_ctrl(target.offset, H2(hash));
emplace_at(target.offset, v); emplace_at(target.offset, v);
infoz_.RecordInsert(hash, target.probe_length); infoz_.RecordInsert(hash, target.probe_length);
@ -1045,7 +1112,9 @@ class raw_hash_set {
} }
iterator insert(const_iterator, node_type&& node) { iterator insert(const_iterator, node_type&& node) {
return insert(std::move(node)).first; auto res = insert(std::move(node));
node = std::move(res.node);
return res.position;
} }
// This overload kicks in if we can deduce the key from args. This enables us // This overload kicks in if we can deduce the key from args. This enables us
@ -1174,7 +1243,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be // This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument. // a better match if non-const iterator is passed as an argument.
void erase(iterator it) { void erase(iterator it) {
it.assert_is_full(); AssertIsFull(it.ctrl_);
PolicyTraits::destroy(&alloc_ref(), it.slot_); PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it); erase_meta_only(it);
} }
@ -1208,7 +1277,7 @@ class raw_hash_set {
} }
node_type extract(const_iterator position) { node_type extract(const_iterator position) {
position.inner_.assert_is_full(); AssertIsFull(position.inner_.ctrl_);
auto node = auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_); CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position); erase_meta_only(position);
@ -1225,8 +1294,8 @@ class raw_hash_set {
void swap(raw_hash_set& that) noexcept( void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() && IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
(!AllocTraits::propagate_on_container_swap::value || IsNoThrowSwappable<allocator_type>(
IsNoThrowSwappable<allocator_type>())) { typename AllocTraits::propagate_on_container_swap{})) {
using std::swap; using std::swap;
swap(ctrl_, that.ctrl_); swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_); swap(slots_, that.slots_);
@ -1236,12 +1305,8 @@ class raw_hash_set {
swap(hash_ref(), that.hash_ref()); swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref()); swap(eq_ref(), that.eq_ref());
swap(infoz_, that.infoz_); swap(infoz_, that.infoz_);
if (AllocTraits::propagate_on_container_swap::value) { SwapAlloc(alloc_ref(), that.alloc_ref(),
swap(alloc_ref(), that.alloc_ref()); typename AllocTraits::propagate_on_container_swap{});
} else {
// If the allocators do not compare equal it is officially undefined
// behavior. We choose to do nothing.
}
} }
void rehash(size_t n) { void rehash(size_t n) {
@ -1260,7 +1325,12 @@ class raw_hash_set {
} }
} }
void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } void reserve(size_t n) {
size_t m = GrowthToLowerboundCapacity(n);
if (m > capacity_) {
resize(NormalizeCapacity(m));
}
}
// Extension API: support for heterogeneous keys. // Extension API: support for heterogeneous keys.
// //
@ -1285,7 +1355,7 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const { void prefetch(const key_arg<K>& key) const {
(void)key; (void)key;
#if defined(__GNUC__) #if defined(__GNUC__)
auto seq = probe(hash_ref()(key)); auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset())); __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset())); __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__ #endif // __GNUC__
@ -1300,7 +1370,7 @@ class raw_hash_set {
// called heterogeneous key support. // called heterogeneous key support.
template <class K = key_type> template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) { iterator find(const key_arg<K>& key, size_t hash) {
auto seq = probe(hash); auto seq = probe(ctrl_, hash, capacity_);
while (true) { while (true) {
Group g{ctrl_ + seq.offset()}; Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) { for (int i : g.Match(H2(hash))) {
@ -1311,6 +1381,7 @@ class raw_hash_set {
} }
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next(); seq.next();
assert(seq.index() < capacity_ && "full table!");
} }
} }
template <class K = key_type> template <class K = key_type>
@ -1521,7 +1592,7 @@ class raw_hash_set {
if (IsFull(old_ctrl[i])) { if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i)); PolicyTraits::element(old_slots + i));
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset; size_t new_i = target.offset;
total_probe_length += target.probe_length; total_probe_length += target.probe_length;
set_ctrl(new_i, H2(hash)); set_ctrl(new_i, H2(hash));
@ -1540,7 +1611,7 @@ class raw_hash_set {
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_)); assert(IsValidCapacity(capacity_));
assert(!is_small()); assert(!is_small(capacity_));
// Algorithm: // Algorithm:
// - mark all DELETED slots as EMPTY // - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED // - mark all FULL slots as DELETED
@ -1565,7 +1636,7 @@ class raw_hash_set {
if (!IsDeleted(ctrl_[i])) continue; if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i)); PolicyTraits::element(slots_ + i));
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset; size_t new_i = target.offset;
total_probe_length += target.probe_length; total_probe_length += target.probe_length;
@ -1573,7 +1644,8 @@ class raw_hash_set {
// If they do, we don't need to move the object as it falls already in the // If they do, we don't need to move the object as it falls already in the
// best probe we can. // best probe we can.
const auto probe_index = [&](size_t pos) { const auto probe_index = [&](size_t pos) {
return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
Group::kWidth;
}; };
// Element doesn't move. // Element doesn't move.
@ -1617,7 +1689,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const { bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
auto seq = probe(hash); auto seq = probe(ctrl_, hash, capacity_);
while (true) { while (true) {
Group g{ctrl_ + seq.offset()}; Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) { for (int i : g.Match(H2(hash))) {
@ -1632,41 +1704,6 @@ class raw_hash_set {
return false; return false;
} }
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
struct FindInfo {
size_t offset;
size_t probe_length;
};
FindInfo find_first_non_full(size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
assert(seq.index() < capacity_ && "full table!");
seq.next();
}
}
// TODO(alkis): Optimize this assuming *this and that don't overlap. // TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that)); raw_hash_set tmp(std::move(that));
@ -1683,7 +1720,7 @@ class raw_hash_set {
template <class K> template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) { std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
auto hash = hash_ref()(key); auto hash = hash_ref()(key);
auto seq = probe(hash); auto seq = probe(ctrl_, hash, capacity_);
while (true) { while (true) {
Group g{ctrl_ + seq.offset()}; Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) { for (int i : g.Match(H2(hash))) {
@ -1694,16 +1731,17 @@ class raw_hash_set {
} }
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next(); seq.next();
assert(seq.index() < capacity_ && "full table!");
} }
return {prepare_insert(hash), true}; return {prepare_insert(hash), true};
} }
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(hash); auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 && if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(ctrl_[target.offset]))) { !IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary(); rehash_and_grow_if_necessary();
target = find_first_non_full(hash); target = find_first_non_full(ctrl_, hash, capacity_);
} }
++size_; ++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]); growth_left() -= IsEmpty(ctrl_[target.offset]);
@ -1736,10 +1774,6 @@ class raw_hash_set {
private: private:
friend struct RawHashSetTestOnlyAccess; friend struct RawHashSetTestOnlyAccess;
probe_seq<Group::kWidth> probe(size_t hash) const {
return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
}
// Reset all ctrl bytes back to kEmpty, except the sentinel. // Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl() { void reset_ctrl() {
std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
@ -1769,22 +1803,6 @@ class raw_hash_set {
size_t& growth_left() { return settings_.template get<0>(); } size_t& growth_left() { return settings_.template get<0>(); }
// The representation of the object has two modes:
// - small: For capacities < kWidth-1
// - large: For the rest.
//
// Differences:
// - In small mode we are able to use the whole capacity. The extra control
// bytes give us at least one "empty" control byte to stop the iteration.
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
bool is_small() const { return capacity_ < Group::kWidth - 1; }
hasher& hash_ref() { return settings_.template get<1>(); } hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); } const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); } key_equal& eq_ref() { return settings_.template get<2>(); }
@ -1828,7 +1846,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) { const typename Set::key_type& key) {
size_t num_probes = 0; size_t num_probes = 0;
size_t hash = set.hash_ref()(key); size_t hash = set.hash_ref()(key);
auto seq = set.probe(hash); auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) { while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()}; container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) { for (int i : g.Match(container_internal::H2(hash))) {

View file

@ -424,6 +424,81 @@ TEST_F(PropagateOnAll, Swap) {
EXPECT_EQ(0, it->num_copies()); EXPECT_EQ(0, it->num_copies());
} }
// This allocator is similar to std::pmr::polymorphic_allocator.
// Note the disabled assignment.
template <class T>
class PAlloc {
template <class>
friend class PAlloc;
public:
// types
using value_type = T;
// traits
using propagate_on_container_swap = std::false_type;
PAlloc() noexcept = default;
explicit PAlloc(size_t id) noexcept : id_(id) {}
PAlloc(const PAlloc&) noexcept = default;
PAlloc& operator=(const PAlloc&) noexcept = delete;
template <class U>
PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {} // NOLINT
template <class U>
struct rebind {
using other = PAlloc<U>;
};
constexpr PAlloc select_on_container_copy_construction() const { return {}; }
// public member functions
T* allocate(size_t) { return new T; }
void deallocate(T* p, size_t) { delete p; }
friend bool operator==(const PAlloc& a, const PAlloc& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
private:
size_t id_ = std::numeric_limits<size_t>::max();
};
// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
__GNUC_MINOR__ != 5)
TEST(NoPropagateOn, Swap) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(PA{2});
swap(t1, t2);
EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA(2));
}
#endif
TEST(NoPropagateOn, CopyConstruct) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(t1);
EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA());
}
TEST(NoPropagateOn, Assignment) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(PA{2});
t1 = t2;
EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA(2));
}
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

View file

@ -26,6 +26,7 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/base/attributes.h" #include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h" #include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h" #include "absl/container/internal/container_memory.h"
@ -846,7 +847,8 @@ TEST(Table, EraseMaintainsValidIterator) {
std::vector<int64_t> CollectBadMergeKeys(size_t N) { std::vector<int64_t> CollectBadMergeKeys(size_t N) {
static constexpr int kGroupSize = Group::kWidth - 1; static constexpr int kGroupSize = Group::kWidth - 1;
auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> { auto topk_range = [](size_t b, size_t e,
IntTable* t) -> std::vector<int64_t> {
for (size_t i = b; i != e; ++i) { for (size_t i = b; i != e; ++i) {
t->emplace(i); t->emplace(i);
} }
@ -1000,8 +1002,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
// 1. Create new table and reserve it to keys.size() * 2 // 1. Create new table and reserve it to keys.size() * 2
// 2. Insert all keys xored with seed // 2. Insert all keys xored with seed
// 3. Collect ProbeStats from final table. // 3. Collect ProbeStats from final table.
ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys, ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
size_t num_iters) { const std::vector<int64_t>& keys, size_t num_iters) {
const size_t reserve_size = keys.size() * 2; const size_t reserve_size = keys.size() * 2;
ProbeStats stats; ProbeStats stats;
@ -1709,6 +1711,26 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node); EXPECT_FALSE(node);
} }
TEST(Nodes, HintInsert) {
IntTable t = {1, 2, 3};
auto node = t.extract(1);
EXPECT_THAT(t, UnorderedElementsAre(2, 3));
auto it = t.insert(t.begin(), std::move(node));
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
EXPECT_EQ(*it, 1);
EXPECT_FALSE(node);
node = t.extract(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 3));
// reinsert 2 to make the next insert fail.
t.insert(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
it = t.insert(t.begin(), std::move(node));
EXPECT_EQ(*it, 2);
// The node was not emptied by the insert call.
EXPECT_TRUE(node);
}
IntTable MakeSimpleTable(size_t size) { IntTable MakeSimpleTable(size_t size) {
IntTable t; IntTable t;
while (t.size() < size) t.insert(t.size()); while (t.size() < size) t.insert(t.size());
@ -1791,11 +1813,11 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t; IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms. // Extra simple "regexp" as regexp support is highly varied across platforms.
constexpr char kDeathMsg[] = "IsFull"; constexpr char kDeathMsg[] = "Invalid operation on iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg); EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
} }
#if defined(ABSL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) { TEST(RawHashSamplerTest, Sample) {
// Enable the feature even if the prod default is off. // Enable the feature even if the prod default is off.
SetHashtablezEnabled(true); SetHashtablezEnabled(true);
@ -1816,7 +1838,7 @@ TEST(RawHashSamplerTest, Sample) {
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()), EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005); 0.01, 0.005);
} }
#endif // ABSL_HASHTABLEZ_SAMPLER #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) { TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
// Enable the feature even if the prod default is off. // Enable the feature even if the prod default is off.
@ -1839,7 +1861,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
0.00, 0.001); 0.00, 0.001);
} }
#ifdef ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) { TEST(Sanitizer, PoisoningUnused) {
IntTable t; IntTable t;
t.reserve(5); t.reserve(5);
@ -1863,7 +1885,7 @@ TEST(Sanitizer, PoisoningOnErase) {
t.erase(0); t.erase(0);
EXPECT_TRUE(__asan_address_is_poisoned(&v)); EXPECT_TRUE(__asan_address_is_poisoned(&v));
} }
#endif // ADDRESS_SANITIZER #endif // ABSL_HAVE_ADDRESS_SANITIZER
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal

View file

@ -225,7 +225,8 @@ class node_hash_map
// //
// size_type erase(const key_type& key): // size_type erase(const key_type& key):
// //
// Erases the element with the matching key, if it exists. // Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase; using Base::erase;
// node_hash_map::insert() // node_hash_map::insert()
@ -374,6 +375,11 @@ class node_hash_map
// key value and returns a node handle owning that extracted data. If the // key value and returns a node handle owning that extracted data. If the
// `node_hash_map` does not contain an element with a matching key, this // `node_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle. // function returns an empty node handle.
//
// NOTE: when compiled in an earlier version of C++ than C++17,
// `node_type::key()` returns a const reference to the key instead of a
// mutable reference. We cannot safely return a mutable reference without
// std::launder (which is not available before C++17).
using Base::extract; using Base::extract;
// node_hash_map::merge() // node_hash_map::merge()

View file

@ -254,6 +254,21 @@ TEST(NodeHashMap, EraseIf) {
} }
} }
// This test requires std::launder for mutable key access in node handles.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
node_hash_map<std::string, std::string> map;
map["key1"] = "mapped";
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
}
#endif
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END

View file

@ -217,7 +217,8 @@ class node_hash_set
// //
// size_type erase(const key_type& key): // size_type erase(const key_type& key):
// //
// Erases the element with the matching key, if it exists. // Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase; using Base::erase;
// node_hash_set::insert() // node_hash_set::insert()

View file

@ -5,47 +5,6 @@
list(APPEND ABSL_CLANG_CL_FLAGS list(APPEND ABSL_CLANG_CL_FLAGS
"/W3" "/W3"
"-Wno-c++98-compat-pedantic"
"-Wno-conversion"
"-Wno-covered-switch-default"
"-Wno-deprecated"
"-Wno-disabled-macro-expansion"
"-Wno-double-promotion"
"-Wno-comma"
"-Wno-extra-semi"
"-Wno-extra-semi-stmt"
"-Wno-packed"
"-Wno-padded"
"-Wno-sign-compare"
"-Wno-float-conversion"
"-Wno-float-equal"
"-Wno-format-nonliteral"
"-Wno-gcc-compat"
"-Wno-global-constructors"
"-Wno-exit-time-destructors"
"-Wno-non-modular-include-in-module"
"-Wno-old-style-cast"
"-Wno-range-loop-analysis"
"-Wno-reserved-id-macro"
"-Wno-shorten-64-to-32"
"-Wno-switch-enum"
"-Wno-thread-safety-negative"
"-Wno-unknown-warning-option"
"-Wno-unreachable-code"
"-Wno-unused-macros"
"-Wno-weak-vtables"
"-Wno-zero-as-null-pointer-constant"
"-Wbitfield-enum-conversion"
"-Wbool-conversion"
"-Wconstant-conversion"
"-Wenum-conversion"
"-Wint-conversion"
"-Wliteral-conversion"
"-Wnon-literal-null-conversion"
"-Wnull-conversion"
"-Wobjc-literal-conversion"
"-Wno-sign-conversion"
"-Wstring-conversion"
"/DNOMINMAX" "/DNOMINMAX"
"/DWIN32_LEAN_AND_MEAN" "/DWIN32_LEAN_AND_MEAN"
"/D_CRT_SECURE_NO_WARNINGS" "/D_CRT_SECURE_NO_WARNINGS"
@ -78,16 +37,17 @@ list(APPEND ABSL_GCC_FLAGS
"-Wextra" "-Wextra"
"-Wcast-qual" "-Wcast-qual"
"-Wconversion-null" "-Wconversion-null"
"-Wformat-security"
"-Wmissing-declarations" "-Wmissing-declarations"
"-Woverlength-strings" "-Woverlength-strings"
"-Wpointer-arith" "-Wpointer-arith"
"-Wundef"
"-Wunused-local-typedefs" "-Wunused-local-typedefs"
"-Wunused-result" "-Wunused-result"
"-Wvarargs" "-Wvarargs"
"-Wvla" "-Wvla"
"-Wwrite-strings" "-Wwrite-strings"
"-Wno-missing-field-initializers" "-DNOMINMAX"
"-Wno-sign-compare"
) )
list(APPEND ABSL_GCC_TEST_FLAGS list(APPEND ABSL_GCC_TEST_FLAGS
@ -103,48 +63,37 @@ list(APPEND ABSL_GCC_TEST_FLAGS
list(APPEND ABSL_LLVM_FLAGS list(APPEND ABSL_LLVM_FLAGS
"-Wall" "-Wall"
"-Wextra" "-Wextra"
"-Weverything" "-Wcast-qual"
"-Wno-c++98-compat-pedantic" "-Wconversion"
"-Wno-conversion" "-Wfloat-overflow-conversion"
"-Wno-covered-switch-default" "-Wfloat-zero-conversion"
"-Wno-deprecated" "-Wfor-loop-analysis"
"-Wno-disabled-macro-expansion" "-Wformat-security"
"-Wno-double-promotion" "-Wgnu-redeclared-enum"
"-Wno-comma" "-Winfinite-recursion"
"-Wno-extra-semi"
"-Wno-extra-semi-stmt"
"-Wno-packed"
"-Wno-padded"
"-Wno-sign-compare"
"-Wno-float-conversion"
"-Wno-float-equal"
"-Wno-format-nonliteral"
"-Wno-gcc-compat"
"-Wno-global-constructors"
"-Wno-exit-time-destructors"
"-Wno-non-modular-include-in-module"
"-Wno-old-style-cast"
"-Wno-range-loop-analysis"
"-Wno-reserved-id-macro"
"-Wno-shorten-64-to-32"
"-Wno-switch-enum"
"-Wno-thread-safety-negative"
"-Wno-unknown-warning-option"
"-Wno-unreachable-code"
"-Wno-unused-macros"
"-Wno-weak-vtables"
"-Wno-zero-as-null-pointer-constant"
"-Wbitfield-enum-conversion"
"-Wbool-conversion"
"-Wconstant-conversion"
"-Wenum-conversion"
"-Wint-conversion"
"-Wliteral-conversion" "-Wliteral-conversion"
"-Wnon-literal-null-conversion" "-Wmissing-declarations"
"-Wnull-conversion" "-Woverlength-strings"
"-Wobjc-literal-conversion" "-Wpointer-arith"
"-Wno-sign-conversion" "-Wself-assign"
"-Wshadow"
"-Wstring-conversion" "-Wstring-conversion"
"-Wtautological-overlap-compare"
"-Wundef"
"-Wuninitialized"
"-Wunreachable-code"
"-Wunused-comparison"
"-Wunused-local-typedefs"
"-Wunused-result"
"-Wvla"
"-Wwrite-strings"
"-Wno-float-conversion"
"-Wno-implicit-float-conversion"
"-Wno-implicit-int-float-conversion"
"-Wno-implicit-int-conversion"
"-Wno-shorten-64-to-32"
"-Wno-sign-conversion"
"-DNOMINMAX"
) )
list(APPEND ABSL_LLVM_TEST_FLAGS list(APPEND ABSL_LLVM_TEST_FLAGS

View file

@ -6,47 +6,6 @@
ABSL_CLANG_CL_FLAGS = [ ABSL_CLANG_CL_FLAGS = [
"/W3", "/W3",
"-Wno-c++98-compat-pedantic",
"-Wno-conversion",
"-Wno-covered-switch-default",
"-Wno-deprecated",
"-Wno-disabled-macro-expansion",
"-Wno-double-promotion",
"-Wno-comma",
"-Wno-extra-semi",
"-Wno-extra-semi-stmt",
"-Wno-packed",
"-Wno-padded",
"-Wno-sign-compare",
"-Wno-float-conversion",
"-Wno-float-equal",
"-Wno-format-nonliteral",
"-Wno-gcc-compat",
"-Wno-global-constructors",
"-Wno-exit-time-destructors",
"-Wno-non-modular-include-in-module",
"-Wno-old-style-cast",
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32",
"-Wno-switch-enum",
"-Wno-thread-safety-negative",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
"-Wno-unused-macros",
"-Wno-weak-vtables",
"-Wno-zero-as-null-pointer-constant",
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wenum-conversion",
"-Wint-conversion",
"-Wliteral-conversion",
"-Wnon-literal-null-conversion",
"-Wnull-conversion",
"-Wobjc-literal-conversion",
"-Wno-sign-conversion",
"-Wstring-conversion",
"/DNOMINMAX", "/DNOMINMAX",
"/DWIN32_LEAN_AND_MEAN", "/DWIN32_LEAN_AND_MEAN",
"/D_CRT_SECURE_NO_WARNINGS", "/D_CRT_SECURE_NO_WARNINGS",
@ -79,16 +38,17 @@ ABSL_GCC_FLAGS = [
"-Wextra", "-Wextra",
"-Wcast-qual", "-Wcast-qual",
"-Wconversion-null", "-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations", "-Wmissing-declarations",
"-Woverlength-strings", "-Woverlength-strings",
"-Wpointer-arith", "-Wpointer-arith",
"-Wundef",
"-Wunused-local-typedefs", "-Wunused-local-typedefs",
"-Wunused-result", "-Wunused-result",
"-Wvarargs", "-Wvarargs",
"-Wvla", "-Wvla",
"-Wwrite-strings", "-Wwrite-strings",
"-Wno-missing-field-initializers", "-DNOMINMAX",
"-Wno-sign-compare",
] ]
ABSL_GCC_TEST_FLAGS = [ ABSL_GCC_TEST_FLAGS = [
@ -104,48 +64,37 @@ ABSL_GCC_TEST_FLAGS = [
ABSL_LLVM_FLAGS = [ ABSL_LLVM_FLAGS = [
"-Wall", "-Wall",
"-Wextra", "-Wextra",
"-Weverything", "-Wcast-qual",
"-Wno-c++98-compat-pedantic", "-Wconversion",
"-Wno-conversion", "-Wfloat-overflow-conversion",
"-Wno-covered-switch-default", "-Wfloat-zero-conversion",
"-Wno-deprecated", "-Wfor-loop-analysis",
"-Wno-disabled-macro-expansion", "-Wformat-security",
"-Wno-double-promotion", "-Wgnu-redeclared-enum",
"-Wno-comma", "-Winfinite-recursion",
"-Wno-extra-semi",
"-Wno-extra-semi-stmt",
"-Wno-packed",
"-Wno-padded",
"-Wno-sign-compare",
"-Wno-float-conversion",
"-Wno-float-equal",
"-Wno-format-nonliteral",
"-Wno-gcc-compat",
"-Wno-global-constructors",
"-Wno-exit-time-destructors",
"-Wno-non-modular-include-in-module",
"-Wno-old-style-cast",
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32",
"-Wno-switch-enum",
"-Wno-thread-safety-negative",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
"-Wno-unused-macros",
"-Wno-weak-vtables",
"-Wno-zero-as-null-pointer-constant",
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wenum-conversion",
"-Wint-conversion",
"-Wliteral-conversion", "-Wliteral-conversion",
"-Wnon-literal-null-conversion", "-Wmissing-declarations",
"-Wnull-conversion", "-Woverlength-strings",
"-Wobjc-literal-conversion", "-Wpointer-arith",
"-Wno-sign-conversion", "-Wself-assign",
"-Wshadow",
"-Wstring-conversion", "-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
"-Wno-float-conversion",
"-Wno-implicit-float-conversion",
"-Wno-implicit-int-float-conversion",
"-Wno-implicit-int-conversion",
"-Wno-shorten-64-to-32",
"-Wno-sign-conversion",
"-DNOMINMAX",
] ]
ABSL_LLVM_TEST_FLAGS = [ ABSL_LLVM_TEST_FLAGS = [

View file

@ -23,15 +23,13 @@ load(
ABSL_DEFAULT_COPTS = select({ ABSL_DEFAULT_COPTS = select({
"//absl:windows": ABSL_MSVC_FLAGS, "//absl:windows": ABSL_MSVC_FLAGS,
"//absl:llvm_compiler": ABSL_LLVM_FLAGS, "//absl:clang_compiler": ABSL_LLVM_FLAGS,
"//conditions:default": ABSL_GCC_FLAGS, "//conditions:default": ABSL_GCC_FLAGS,
}) })
# in absence of modules (--compiler=gcc or -c opt), cc_tests leak their copts
# to their (included header) dependencies and fail to build outside absl
ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({ ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({
"//absl:windows": ABSL_MSVC_TEST_FLAGS, "//absl:windows": ABSL_MSVC_TEST_FLAGS,
"//absl:llvm_compiler": ABSL_LLVM_TEST_FLAGS, "//absl:clang_compiler": ABSL_LLVM_TEST_FLAGS,
"//conditions:default": ABSL_GCC_TEST_FLAGS, "//conditions:default": ABSL_GCC_TEST_FLAGS,
}) })

View file

@ -16,77 +16,6 @@ MSVC_BIG_WARNING_FLAGS = [
"/W3", "/W3",
] ]
LLVM_BIG_WARNING_FLAGS = [
"-Wall",
"-Wextra",
"-Weverything",
]
# Docs on single flags is preceded by a comment.
# Docs on groups of flags is preceded by ###.
LLVM_DISABLE_WARNINGS_FLAGS = [
# Abseil does not support C++98
"-Wno-c++98-compat-pedantic",
# Turns off all implicit conversion warnings. Most are re-enabled below.
"-Wno-conversion",
"-Wno-covered-switch-default",
"-Wno-deprecated",
"-Wno-disabled-macro-expansion",
"-Wno-double-promotion",
###
# Turned off as they include valid C++ code.
"-Wno-comma",
"-Wno-extra-semi",
"-Wno-extra-semi-stmt",
"-Wno-packed",
"-Wno-padded",
###
# Google style does not use unsigned integers, though STL containers
# have unsigned types.
"-Wno-sign-compare",
###
"-Wno-float-conversion",
"-Wno-float-equal",
"-Wno-format-nonliteral",
# Too aggressive: warns on Clang extensions enclosed in Clang-only
# compilation paths.
"-Wno-gcc-compat",
###
# Some internal globals are necessary. Don't do this at home.
"-Wno-global-constructors",
"-Wno-exit-time-destructors",
###
"-Wno-non-modular-include-in-module",
"-Wno-old-style-cast",
# Warns on preferred usage of non-POD types such as string_view
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32",
"-Wno-switch-enum",
"-Wno-thread-safety-negative",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
# Causes warnings on include guards
"-Wno-unused-macros",
"-Wno-weak-vtables",
# Causes warnings on usage of types/compare.h comparison operators.
"-Wno-zero-as-null-pointer-constant",
###
# Implicit conversion warnings turned off by -Wno-conversion
# which are re-enabled below.
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wenum-conversion",
"-Wint-conversion",
"-Wliteral-conversion",
"-Wnon-literal-null-conversion",
"-Wnull-conversion",
"-Wobjc-literal-conversion",
"-Wno-sign-conversion",
"-Wstring-conversion",
]
LLVM_TEST_DISABLE_WARNINGS_FLAGS = [ LLVM_TEST_DISABLE_WARNINGS_FLAGS = [
"-Wno-c99-extensions", "-Wno-c99-extensions",
"-Wno-deprecated-declarations", "-Wno-deprecated-declarations",
@ -125,21 +54,18 @@ COPT_VARS = {
"-Wextra", "-Wextra",
"-Wcast-qual", "-Wcast-qual",
"-Wconversion-null", "-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations", "-Wmissing-declarations",
"-Woverlength-strings", "-Woverlength-strings",
"-Wpointer-arith", "-Wpointer-arith",
"-Wundef",
"-Wunused-local-typedefs", "-Wunused-local-typedefs",
"-Wunused-result", "-Wunused-result",
"-Wvarargs", "-Wvarargs",
"-Wvla", # variable-length array "-Wvla", # variable-length array
"-Wwrite-strings", "-Wwrite-strings",
# gcc-4.x has spurious missing field initializer warnings. # Don't define min and max macros (Build on Windows using gcc)
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36750 "-DNOMINMAX",
# Remove when gcc-4.x is no longer supported.
"-Wno-missing-field-initializers",
# Google style does not use unsigned integers, though STL containers
# have unsigned types.
"-Wno-sign-compare",
], ],
"ABSL_GCC_TEST_FLAGS": [ "ABSL_GCC_TEST_FLAGS": [
"-Wno-conversion-null", "-Wno-conversion-null",
@ -150,12 +76,48 @@ COPT_VARS = {
"-Wno-unused-parameter", "-Wno-unused-parameter",
"-Wno-unused-private-field", "-Wno-unused-private-field",
], ],
"ABSL_LLVM_FLAGS": "ABSL_LLVM_FLAGS": [
LLVM_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS, "-Wall",
"-Wextra",
"-Wcast-qual",
"-Wconversion",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",
"-Wformat-security",
"-Wgnu-redeclared-enum",
"-Winfinite-recursion",
"-Wliteral-conversion",
"-Wmissing-declarations",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wself-assign",
"-Wshadow",
"-Wstring-conversion",
"-Wtautological-overlap-compare",
"-Wundef",
"-Wuninitialized",
"-Wunreachable-code",
"-Wunused-comparison",
"-Wunused-local-typedefs",
"-Wunused-result",
"-Wvla",
"-Wwrite-strings",
# Warnings that are enabled by group warning flags like -Wall that we
# explicitly disable.
"-Wno-float-conversion",
"-Wno-implicit-float-conversion",
"-Wno-implicit-int-float-conversion",
"-Wno-implicit-int-conversion",
"-Wno-shorten-64-to-32",
"-Wno-sign-conversion",
# Don't define min and max macros (Build on Windows using clang)
"-DNOMINMAX",
],
"ABSL_LLVM_TEST_FLAGS": "ABSL_LLVM_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS, LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_CLANG_CL_FLAGS": "ABSL_CLANG_CL_FLAGS":
(MSVC_BIG_WARNING_FLAGS + LLVM_DISABLE_WARNINGS_FLAGS + MSVC_DEFINES), (MSVC_BIG_WARNING_FLAGS + MSVC_DEFINES),
"ABSL_CLANG_CL_TEST_FLAGS": "ABSL_CLANG_CL_TEST_FLAGS":
LLVM_TEST_DISABLE_WARNINGS_FLAGS, LLVM_TEST_DISABLE_WARNINGS_FLAGS,
"ABSL_MSVC_FLAGS": "ABSL_MSVC_FLAGS":

View file

@ -26,7 +26,7 @@ package(
default_visibility = ["//visibility:public"], default_visibility = ["//visibility:public"],
) )
licenses(["notice"]) # Apache 2.0 licenses(["notice"])
cc_library( cc_library(
name = "stacktrace", name = "stacktrace",
@ -97,6 +97,7 @@ cc_test(
":stack_consumption", ":stack_consumption",
":symbolize", ":symbolize",
"//absl/base", "//absl/base",
"//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:raw_logging_internal", "//absl/base:raw_logging_internal",
"//absl/memory", "//absl/memory",
@ -148,6 +149,7 @@ cc_test(
copts = ABSL_TEST_COPTS, copts = ABSL_TEST_COPTS,
linkopts = select({ linkopts = select({
"//absl:windows": [], "//absl:windows": [],
"//absl:wasm": [],
"//conditions:default": ["-pthread"], "//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS, }) + ABSL_DEFAULT_LINKOPTS,
deps = [ deps = [
@ -203,6 +205,7 @@ cc_test(
deps = [ deps = [
":demangle_internal", ":demangle_internal",
":stack_consumption", ":stack_consumption",
"//absl/base:config",
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:raw_logging_internal", "//absl/base:raw_logging_internal",
"//absl/memory", "//absl/memory",
@ -236,7 +239,7 @@ cc_library(
# These targets exists for use in tests only, explicitly configuring the # These targets exists for use in tests only, explicitly configuring the
# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan. # LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan.
ABSL_LSAN_LINKOPTS = select({ ABSL_LSAN_LINKOPTS = select({
"//absl:llvm_compiler": ["-fsanitize=leak"], "//absl:clang_compiler": ["-fsanitize=leak"],
"//conditions:default": [], "//conditions:default": [],
}) })
@ -246,7 +249,7 @@ cc_library(
srcs = ["leak_check.cc"], srcs = ["leak_check.cc"],
hdrs = ["leak_check.h"], hdrs = ["leak_check.h"],
copts = select({ copts = select({
"//absl:llvm_compiler": ["-DLEAK_SANITIZER"], "//absl:clang_compiler": ["-DLEAK_SANITIZER"],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_DEFAULT_LINKOPTS,
@ -273,7 +276,7 @@ cc_test(
name = "leak_check_test", name = "leak_check_test",
srcs = ["leak_check_test.cc"], srcs = ["leak_check_test.cc"],
copts = select({ copts = select({
"//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"], "//absl:clang_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS, linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,

View file

@ -82,6 +82,7 @@ absl_cc_test(
absl::stack_consumption absl::stack_consumption
absl::symbolize absl::symbolize
absl::base absl::base
absl::config
absl::core_headers absl::core_headers
absl::memory absl::memory
absl::raw_logging_internal absl::raw_logging_internal
@ -189,6 +190,7 @@ absl_cc_test(
DEPS DEPS
absl::demangle_internal absl::demangle_internal
absl::stack_consumption absl::stack_consumption
absl::config
absl::core_headers absl::core_headers
absl::memory absl::memory
absl::raw_logging_internal absl::raw_logging_internal

View file

@ -136,8 +136,8 @@ static bool SetupAlternateStackOnce() {
const size_t page_mask = sysconf(_SC_PAGESIZE) - 1; const size_t page_mask = sysconf(_SC_PAGESIZE) - 1;
#endif #endif
size_t stack_size = (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask; size_t stack_size = (std::max(SIGSTKSZ, 65536) + page_mask) & ~page_mask;
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ #if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(THREAD_SANITIZER) defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
// Account for sanitizer instrumentation requiring additional stack space. // Account for sanitizer instrumentation requiring additional stack space.
stack_size *= 5; stack_size *= 5;
#endif #endif

View file

@ -68,6 +68,7 @@ static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
// unimplemented. // unimplemented.
// This is a namespace-scoped variable for correct zero-initialization. // This is a namespace-scoped variable for correct zero-initialization.
static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid. static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
bool AddressIsReadable(const void *addr) { bool AddressIsReadable(const void *addr) {
absl::base_internal::ErrnoSaver errno_saver; absl::base_internal::ErrnoSaver errno_saver;
// We test whether a byte is readable by using write(). Normally, this would // We test whether a byte is readable by using write(). Normally, this would
@ -86,7 +87,7 @@ bool AddressIsReadable(const void *addr) {
int pid; int pid;
int read_fd; int read_fd;
int write_fd; int write_fd;
uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed); uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
while (current_pid != pid) { while (current_pid != pid) {
int p[2]; int p[2];
@ -98,13 +99,13 @@ bool AddressIsReadable(const void *addr) {
fcntl(p[1], F_SETFD, FD_CLOEXEC); fcntl(p[1], F_SETFD, FD_CLOEXEC);
uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]); uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
if (pid_and_fds.compare_exchange_strong( if (pid_and_fds.compare_exchange_strong(
local_pid_and_fds, new_pid_and_fds, std::memory_order_relaxed, local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
} else { // fds not exposed to other threads; we can close them. } else { // fds not exposed to other threads; we can close them.
close(p[0]); close(p[0]);
close(p[1]); close(p[1]);
local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed); local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
} }
Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
} }
@ -124,7 +125,7 @@ bool AddressIsReadable(const void *addr) {
// If pid_and_fds contains the problematic file descriptors we just used, // If pid_and_fds contains the problematic file descriptors we just used,
// this call will forget them, and the loop will try again. // this call will forget them, and the loop will try again.
pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0, pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
std::memory_order_relaxed, std::memory_order_release,
std::memory_order_relaxed); std::memory_order_relaxed);
} }
} while (errno == EBADF); } while (errno == EBADF);

View file

@ -126,6 +126,7 @@ static const AbbrevPair kBuiltinTypeList[] = {
{"Dn", "std::nullptr_t", 0}, // i.e., decltype(nullptr) {"Dn", "std::nullptr_t", 0}, // i.e., decltype(nullptr)
{"Df", "decimal32", 0}, // IEEE 754r decimal floating point (32 bits) {"Df", "decimal32", 0}, // IEEE 754r decimal floating point (32 bits)
{"Di", "char32_t", 0}, {"Di", "char32_t", 0},
{"Du", "char8_t", 0},
{"Ds", "char16_t", 0}, {"Ds", "char16_t", 0},
{"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits) {"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits)
{nullptr, nullptr, 0}, {nullptr, nullptr, 0},
@ -409,6 +410,7 @@ static bool IsFunctionCloneSuffix(const char *str) {
static bool EndsWith(State *state, const char chr) { static bool EndsWith(State *state, const char chr) {
return state->parse_state.out_cur_idx > 0 && return state->parse_state.out_cur_idx > 0 &&
state->parse_state.out_cur_idx < state->out_end_idx &&
chr == state->out[state->parse_state.out_cur_idx - 1]; chr == state->out[state->parse_state.out_cur_idx - 1];
} }
@ -421,8 +423,10 @@ static void MaybeAppendWithLength(State *state, const char *const str,
if (str[0] == '<' && EndsWith(state, '<')) { if (str[0] == '<' && EndsWith(state, '<')) {
Append(state, " ", 1); Append(state, " ", 1);
} }
// Remember the last identifier name for ctors/dtors. // Remember the last identifier name for ctors/dtors,
if (IsAlpha(str[0]) || str[0] == '_') { // but only if we haven't yet overflown the buffer.
if (state->parse_state.out_cur_idx < state->out_end_idx &&
(IsAlpha(str[0]) || str[0] == '_')) {
state->parse_state.prev_name_idx = state->parse_state.out_cur_idx; state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
state->parse_state.prev_name_length = length; state->parse_state.prev_name_length = length;
} }
@ -962,6 +966,7 @@ static bool ParseOperatorName(State *state, int *arity) {
// ::= TT <type> // ::= TT <type>
// ::= TI <type> // ::= TI <type>
// ::= TS <type> // ::= TS <type>
// ::= TH <type> # thread-local
// ::= Tc <call-offset> <call-offset> <(base) encoding> // ::= Tc <call-offset> <call-offset> <(base) encoding>
// ::= GV <(object) name> // ::= GV <(object) name>
// ::= T <call-offset> <(base) encoding> // ::= T <call-offset> <(base) encoding>
@ -980,7 +985,7 @@ static bool ParseSpecialName(State *state) {
ComplexityGuard guard(state); ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false; if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state; ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") && if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
ParseType(state)) { ParseType(state)) {
return true; return true;
} }
@ -1077,20 +1082,28 @@ static bool ParseVOffset(State *state) {
return false; return false;
} }
// <ctor-dtor-name> ::= C1 | C2 | C3 // <ctor-dtor-name> ::= C1 | C2 | C3 | CI1 <base-class-type> | CI2
// <base-class-type>
// ::= D0 | D1 | D2 // ::= D0 | D1 | D2
// # GCC extensions: "unified" constructor/destructor. See // # GCC extensions: "unified" constructor/destructor. See
// # https://github.com/gcc-mirror/gcc/blob/7ad17b583c3643bd4557f29b8391ca7ef08391f5/gcc/cp/mangle.c#L1847 // #
// https://github.com/gcc-mirror/gcc/blob/7ad17b583c3643bd4557f29b8391ca7ef08391f5/gcc/cp/mangle.c#L1847
// ::= C4 | D4 // ::= C4 | D4
static bool ParseCtorDtorName(State *state) { static bool ParseCtorDtorName(State *state) {
ComplexityGuard guard(state); ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false; if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state; ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'C') && ParseCharClass(state, "1234")) { if (ParseOneCharToken(state, 'C')) {
const char *const prev_name = state->out + state->parse_state.prev_name_idx; if (ParseCharClass(state, "1234")) {
MaybeAppendWithLength(state, prev_name, const char *const prev_name =
state->parse_state.prev_name_length); state->out + state->parse_state.prev_name_idx;
return true; MaybeAppendWithLength(state, prev_name,
state->parse_state.prev_name_length);
return true;
} else if (ParseOneCharToken(state, 'I') && ParseCharClass(state, "12") &&
ParseClassEnumType(state)) {
return true;
}
} }
state->parse_state = copy; state->parse_state = copy;
@ -1139,6 +1152,7 @@ static bool ParseDecltype(State *state) {
// ::= <decltype> // ::= <decltype>
// ::= <substitution> // ::= <substitution>
// ::= Dp <type> # pack expansion of (C++0x) // ::= Dp <type> # pack expansion of (C++0x)
// ::= Dv <num-elems> _ # GNU vector extension
// //
static bool ParseType(State *state) { static bool ParseType(State *state) {
ComplexityGuard guard(state); ComplexityGuard guard(state);
@ -1205,6 +1219,12 @@ static bool ParseType(State *state) {
return true; return true;
} }
if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
return true;
}
state->parse_state = copy;
return false; return false;
} }
@ -1253,13 +1273,42 @@ static bool ParseBuiltinType(State *state) {
return false; return false;
} }
// <function-type> ::= F [Y] <bare-function-type> E // <exception-spec> ::= Do # non-throwing
// exception-specification (e.g.,
// noexcept, throw())
// ::= DO <expression> E # computed (instantiation-dependent)
// noexcept
// ::= Dw <type>+ E # dynamic exception specification
// with instantiation-dependent types
static bool ParseExceptionSpec(State *state) {
ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false;
if (ParseTwoCharToken(state, "Do")) return true;
ParseState copy = state->parse_state;
if (ParseTwoCharToken(state, "DO") && ParseExpression(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
if (ParseTwoCharToken(state, "Dw") && OneOrMore(ParseType, state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
state->parse_state = copy;
return false;
}
// <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
static bool ParseFunctionType(State *state) { static bool ParseFunctionType(State *state) {
ComplexityGuard guard(state); ComplexityGuard guard(state);
if (guard.IsTooComplex()) return false; if (guard.IsTooComplex()) return false;
ParseState copy = state->parse_state; ParseState copy = state->parse_state;
if (ParseOneCharToken(state, 'F') && if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) && Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
Optional(ParseOneCharToken(state, 'O')) &&
ParseOneCharToken(state, 'E')) { ParseOneCharToken(state, 'E')) {
return true; return true;
} }
@ -1887,7 +1936,8 @@ static bool Overflowed(const State *state) {
bool Demangle(const char *mangled, char *out, int out_size) { bool Demangle(const char *mangled, char *out, int out_size) {
State state; State state;
InitState(&state, mangled, out, out_size); InitState(&state, mangled, out, out_size);
return ParseTopLevelMangledName(&state) && !Overflowed(&state); return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
state.parse_state.out_cur_idx > 0;
} }
} // namespace debugging_internal } // namespace debugging_internal

View file

@ -18,6 +18,7 @@
#include <string> #include <string>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/debugging/internal/stack_consumption.h" #include "absl/debugging/internal/stack_consumption.h"
#include "absl/memory/memory.h" #include "absl/memory/memory.h"
@ -82,9 +83,10 @@ TEST(Demangle, Clones) {
// Tests that verify that Demangle footprint is within some limit. // Tests that verify that Demangle footprint is within some limit.
// They are not to be run under sanitizers as the sanitizers increase // They are not to be run under sanitizers as the sanitizers increase
// stack consumption by about 4x. // stack consumption by about 4x.
#if defined(ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION) && \ #if defined(ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION) && \
!defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \ !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
!defined(THREAD_SANITIZER) !defined(ABSL_HAVE_MEMORY_SANITIZER) && \
!defined(ABSL_HAVE_THREAD_SANITIZER)
static const char *g_mangled; static const char *g_mangled;
static char g_demangle_buffer[4096]; static char g_demangle_buffer[4096];

View file

@ -42,7 +42,8 @@ namespace {
// one of them is null, the results of p<q, p>q, p<=q, and p>=q are // one of them is null, the results of p<q, p>q, p<=q, and p>=q are
// unspecified. Therefore, instead we hardcode the direction of the // unspecified. Therefore, instead we hardcode the direction of the
// stack on platforms we know about. // stack on platforms we know about.
#if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) #if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \
defined(__aarch64__)
constexpr bool kStackGrowsDown = true; constexpr bool kStackGrowsDown = true;
#else #else
#error Need to define kStackGrowsDown #error Need to define kStackGrowsDown

View file

@ -24,8 +24,9 @@
// Use this feature test macro to detect its availability. // Use this feature test macro to detect its availability.
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION #ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
#error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly #error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly
#elif !defined(__APPLE__) && !defined(_WIN32) && \ #elif !defined(__APPLE__) && !defined(_WIN32) && \
(defined(__i386__) || defined(__x86_64__) || defined(__ppc__)) (defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \
defined(__aarch64__))
#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1 #define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1
namespace absl { namespace absl {

View file

@ -37,8 +37,11 @@ static const unsigned char* GetKernelRtSigreturnAddress() {
absl::debugging_internal::VDSOSupport vdso; absl::debugging_internal::VDSOSupport vdso;
if (vdso.IsPresent()) { if (vdso.IsPresent()) {
absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC, auto lookup = [&](int type) {
&symbol_info) || return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
&symbol_info);
};
if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
symbol_info.address == nullptr) { symbol_info.address == nullptr) {
// Unexpected: VDSO is present, yet the expected symbol is missing // Unexpected: VDSO is present, yet the expected symbol is missing
// or null. // or null.

Some files were not shown because too many files have changed in this diff Show more