From c2e754829628d1e9b7a16b3389cfdace76950fdf Mon Sep 17 00:00:00 2001 From: misterg Date: Tue, 19 Sep 2017 16:54:40 -0400 Subject: [PATCH] Initial Commit --- ABSEIL_ISSUE_TEMPLATE.md | 22 + AUTHORS | 6 + CONTRIBUTING.md | 90 + LICENSE | 204 ++ README.md | 89 + WORKSPACE | 22 + absl/BUILD.bazel | 62 + absl/algorithm/BUILD.bazel | 69 + absl/algorithm/algorithm.h | 138 + absl/algorithm/algorithm_test.cc | 182 ++ absl/algorithm/container.h | 1652 ++++++++++ absl/algorithm/container_test.cc | 1010 +++++++ absl/base/BUILD.bazel | 369 +++ absl/base/attributes.h | 469 +++ absl/base/bit_cast_test.cc | 107 + absl/base/call_once.h | 210 ++ absl/base/call_once_test.cc | 102 + absl/base/casts.h | 141 + absl/base/config.h | 367 +++ absl/base/config_test.cc | 45 + absl/base/dynamic_annotations.cc | 129 + absl/base/dynamic_annotations.h | 409 +++ absl/base/internal/atomic_hook.h | 122 + absl/base/internal/cycleclock.cc | 81 + absl/base/internal/cycleclock.h | 77 + absl/base/internal/endian.h | 267 ++ absl/base/internal/endian_test.cc | 281 ++ absl/base/internal/exception_testing.h | 24 + absl/base/internal/identity.h | 33 + absl/base/internal/invoke.h | 188 ++ absl/base/internal/log_severity.cc | 15 + absl/base/internal/log_severity.h | 52 + absl/base/internal/low_level_alloc.cc | 598 ++++ absl/base/internal/low_level_alloc.h | 120 + absl/base/internal/low_level_alloc_test.cc | 203 ++ absl/base/internal/low_level_scheduling.h | 104 + absl/base/internal/malloc_extension.cc | 197 ++ absl/base/internal/malloc_extension.h | 424 +++ absl/base/internal/malloc_extension_c.h | 75 + absl/base/internal/malloc_extension_test.cc | 102 + absl/base/internal/malloc_hook.cc | 611 ++++ absl/base/internal/malloc_hook.h | 333 ++ absl/base/internal/malloc_hook_c.h | 131 + absl/base/internal/malloc_hook_invoke.h | 198 ++ absl/base/internal/malloc_hook_mmap_linux.inc | 236 ++ absl/base/internal/per_thread_tls.h | 48 + absl/base/internal/raw_logging.cc | 225 ++ absl/base/internal/raw_logging.h | 129 + absl/base/internal/scheduling_mode.h | 54 + absl/base/internal/spinlock.cc | 243 ++ absl/base/internal/spinlock.h | 227 ++ absl/base/internal/spinlock_posix.inc | 46 + absl/base/internal/spinlock_wait.cc | 77 + absl/base/internal/spinlock_wait.h | 94 + absl/base/internal/spinlock_win32.inc | 37 + absl/base/internal/sysinfo.cc | 370 +++ absl/base/internal/sysinfo.h | 64 + absl/base/internal/sysinfo_test.cc | 99 + absl/base/internal/thread_identity.cc | 126 + absl/base/internal/thread_identity.h | 240 ++ absl/base/internal/thread_identity_test.cc | 124 + absl/base/internal/throw_delegate.cc | 106 + absl/base/internal/throw_delegate.h | 71 + absl/base/internal/tsan_mutex_interface.h | 51 + absl/base/internal/unaligned_access.h | 256 ++ absl/base/internal/unscaledcycleclock.cc | 101 + absl/base/internal/unscaledcycleclock.h | 118 + absl/base/invoke_test.cc | 199 ++ absl/base/macros.h | 201 ++ absl/base/optimization.h | 164 + absl/base/policy_checks.h | 99 + absl/base/port.h | 26 + absl/base/raw_logging_test.cc | 50 + absl/base/spinlock_test_common.cc | 265 ++ absl/base/thread_annotations.h | 247 ++ absl/base/throw_delegate_test.cc | 95 + absl/container/BUILD.bazel | 124 + absl/container/fixed_array.h | 493 +++ absl/container/fixed_array_test.cc | 621 ++++ absl/container/inlined_vector.h | 1330 ++++++++ absl/container/inlined_vector_test.cc | 1593 ++++++++++ .../internal/test_instance_tracker.cc | 26 + .../internal/test_instance_tracker.h | 220 ++ .../internal/test_instance_tracker_test.cc | 160 + absl/copts.bzl | 138 + absl/debugging/BUILD.bazel | 170 ++ .../debugging/internal/address_is_readable.cc | 134 + absl/debugging/internal/address_is_readable.h | 29 + absl/debugging/internal/elf_mem_image.cc | 397 +++ absl/debugging/internal/elf_mem_image.h | 125 + .../internal/stacktrace_aarch64-inl.inc | 181 ++ .../debugging/internal/stacktrace_arm-inl.inc | 115 + absl/debugging/internal/stacktrace_config.h | 76 + .../internal/stacktrace_generic-inl.inc | 51 + .../internal/stacktrace_libunwind-inl.inc | 128 + .../internal/stacktrace_powerpc-inl.inc | 234 ++ .../internal/stacktrace_unimplemented-inl.inc | 14 + .../internal/stacktrace_win32-inl.inc | 75 + .../debugging/internal/stacktrace_x86-inl.inc | 327 ++ absl/debugging/internal/vdso_support.cc | 177 ++ absl/debugging/internal/vdso_support.h | 155 + absl/debugging/leak_check.cc | 35 + absl/debugging/leak_check.h | 111 + absl/debugging/leak_check_disable.cc | 20 + absl/debugging/leak_check_fail_test.cc | 41 + absl/debugging/leak_check_test.cc | 41 + absl/debugging/stacktrace.cc | 133 + absl/debugging/stacktrace.h | 160 + absl/memory/BUILD.bazel | 47 + absl/memory/README.md | 22 + absl/memory/memory.h | 622 ++++ absl/memory/memory_test.cc | 590 ++++ absl/meta/BUILD.bazel | 29 + absl/meta/type_traits.h | 314 ++ absl/meta/type_traits_test.cc | 640 ++++ absl/numeric/BUILD.bazel | 39 + absl/numeric/int128.cc | 200 ++ absl/numeric/int128.h | 651 ++++ absl/numeric/int128_have_intrinsic.inc | 3 + absl/numeric/int128_no_intrinsic.inc | 3 + absl/numeric/int128_test.cc | 492 +++ absl/strings/BUILD.bazel | 293 ++ absl/strings/README.md | 87 + absl/strings/ascii.cc | 198 ++ absl/strings/ascii.h | 239 ++ absl/strings/ascii_ctype.h | 66 + absl/strings/ascii_test.cc | 354 +++ absl/strings/escaping.cc | 1093 +++++++ absl/strings/escaping.h | 158 + absl/strings/escaping_test.cc | 638 ++++ absl/strings/internal/char_map.h | 154 + absl/strings/internal/char_map_test.cc | 172 ++ .../strings/internal/escaping_test_common.inc | 113 + absl/strings/internal/fastmem.h | 215 ++ absl/strings/internal/fastmem_test.cc | 453 +++ absl/strings/internal/memutil.cc | 110 + absl/strings/internal/memutil.h | 146 + absl/strings/internal/memutil_test.cc | 180 ++ absl/strings/internal/numbers_test_common.inc | 166 + absl/strings/internal/ostringstream.h | 97 + absl/strings/internal/ostringstream_test.cc | 103 + absl/strings/internal/resize_uninitialized.h | 69 + .../internal/resize_uninitialized_test.cc | 68 + absl/strings/internal/str_join_internal.h | 314 ++ absl/strings/internal/str_split_internal.h | 439 +++ absl/strings/internal/utf8.cc | 51 + absl/strings/internal/utf8.h | 52 + absl/strings/internal/utf8_test.cc | 58 + absl/strings/match.cc | 40 + absl/strings/match.h | 81 + absl/strings/match_test.cc | 99 + absl/strings/numbers.cc | 1288 ++++++++ absl/strings/numbers.h | 173 ++ absl/strings/numbers_test.cc | 1186 ++++++++ absl/strings/str_cat.cc | 208 ++ absl/strings/str_cat.h | 348 +++ absl/strings/str_cat_test.cc | 462 +++ absl/strings/str_join.h | 288 ++ absl/strings/str_join_test.cc | 474 +++ absl/strings/str_replace.cc | 79 + absl/strings/str_replace.h | 213 ++ absl/strings/str_replace_test.cc | 340 +++ absl/strings/str_split.cc | 133 + absl/strings/str_split.h | 511 ++++ absl/strings/str_split_test.cc | 896 ++++++ absl/strings/string_view.cc | 248 ++ absl/strings/string_view.h | 572 ++++ absl/strings/string_view_test.cc | 1097 +++++++ absl/strings/strip.cc | 269 ++ absl/strings/strip.h | 89 + absl/strings/strip_test.cc | 119 + absl/strings/substitute.cc | 117 + absl/strings/substitute.h | 674 +++++ absl/strings/substitute_test.cc | 168 ++ absl/strings/testdata/getline-1.txt | 3 + absl/strings/testdata/getline-2.txt | 1 + absl/synchronization/BUILD.bazel | 178 ++ absl/synchronization/barrier.cc | 50 + absl/synchronization/barrier.h | 77 + absl/synchronization/blocking_counter.cc | 53 + absl/synchronization/blocking_counter.h | 96 + absl/synchronization/blocking_counter_test.cc | 67 + .../internal/create_thread_identity.cc | 110 + .../internal/create_thread_identity.h | 53 + absl/synchronization/internal/graphcycles.cc | 709 +++++ absl/synchronization/internal/graphcycles.h | 136 + .../internal/graphcycles_test.cc | 471 +++ .../synchronization/internal/kernel_timeout.h | 147 + .../synchronization/internal/mutex_nonprod.cc | 311 ++ .../internal/mutex_nonprod.inc | 256 ++ .../internal/per_thread_sem.cc | 106 + .../synchronization/internal/per_thread_sem.h | 107 + .../internal/per_thread_sem_test.cc | 246 ++ absl/synchronization/internal/thread_pool.h | 90 + absl/synchronization/internal/waiter.cc | 394 +++ absl/synchronization/internal/waiter.h | 138 + absl/synchronization/mutex.cc | 2680 +++++++++++++++++ absl/synchronization/mutex.h | 1013 +++++++ absl/synchronization/mutex_test.cc | 1538 ++++++++++ absl/synchronization/notification.cc | 84 + absl/synchronization/notification.h | 112 + absl/synchronization/notification_test.cc | 124 + absl/test_dependencies.bzl | 40 + absl/time/BUILD.bazel | 112 + absl/time/clock.cc | 547 ++++ absl/time/clock.h | 72 + absl/time/clock_test.cc | 70 + absl/time/duration.cc | 864 ++++++ absl/time/duration_test.cc | 1530 ++++++++++ absl/time/format.cc | 140 + absl/time/format_test.cc | 430 +++ absl/time/internal/get_current_time_ios.inc | 80 + absl/time/internal/get_current_time_posix.inc | 22 + .../internal/get_current_time_windows.inc | 17 + absl/time/internal/test_util.cc | 112 + absl/time/internal/test_util.h | 49 + absl/time/internal/zoneinfo.inc | 729 +++++ absl/time/time.cc | 370 +++ absl/time/time.h | 1181 ++++++++ absl/time/time_norm_test.cc | 306 ++ absl/time/time_test.cc | 1027 +++++++ absl/time/time_zone_test.cc | 95 + absl/types/BUILD.bazel | 178 ++ absl/types/any.h | 539 ++++ absl/types/any_test.cc | 713 +++++ absl/types/bad_any_cast.cc | 40 + absl/types/bad_any_cast.h | 44 + absl/types/bad_optional_access.cc | 42 + absl/types/bad_optional_access.h | 37 + absl/types/optional.cc | 24 + absl/types/optional.h | 1092 +++++++ absl/types/optional_test.cc | 1539 ++++++++++ absl/types/span.h | 738 +++++ absl/types/span_test.cc | 783 +++++ absl/utility/BUILD.bazel | 34 + absl/utility/utility.cc | 27 + absl/utility/utility.h | 177 ++ absl/utility/utility_test.cc | 163 + 238 files changed, 65475 insertions(+) create mode 100644 ABSEIL_ISSUE_TEMPLATE.md create mode 100644 AUTHORS create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 WORKSPACE create mode 100644 absl/BUILD.bazel create mode 100644 absl/algorithm/BUILD.bazel create mode 100644 absl/algorithm/algorithm.h create mode 100644 absl/algorithm/algorithm_test.cc create mode 100644 absl/algorithm/container.h create mode 100644 absl/algorithm/container_test.cc create mode 100644 absl/base/BUILD.bazel create mode 100644 absl/base/attributes.h create mode 100644 absl/base/bit_cast_test.cc create mode 100644 absl/base/call_once.h create mode 100644 absl/base/call_once_test.cc create mode 100644 absl/base/casts.h create mode 100644 absl/base/config.h create mode 100644 absl/base/config_test.cc create mode 100644 absl/base/dynamic_annotations.cc create mode 100644 absl/base/dynamic_annotations.h create mode 100644 absl/base/internal/atomic_hook.h create mode 100644 absl/base/internal/cycleclock.cc create mode 100644 absl/base/internal/cycleclock.h create mode 100644 absl/base/internal/endian.h create mode 100644 absl/base/internal/endian_test.cc create mode 100644 absl/base/internal/exception_testing.h create mode 100644 absl/base/internal/identity.h create mode 100644 absl/base/internal/invoke.h create mode 100644 absl/base/internal/log_severity.cc create mode 100644 absl/base/internal/log_severity.h create mode 100644 absl/base/internal/low_level_alloc.cc create mode 100644 absl/base/internal/low_level_alloc.h create mode 100644 absl/base/internal/low_level_alloc_test.cc create mode 100644 absl/base/internal/low_level_scheduling.h create mode 100644 absl/base/internal/malloc_extension.cc create mode 100644 absl/base/internal/malloc_extension.h create mode 100644 absl/base/internal/malloc_extension_c.h create mode 100644 absl/base/internal/malloc_extension_test.cc create mode 100644 absl/base/internal/malloc_hook.cc create mode 100644 absl/base/internal/malloc_hook.h create mode 100644 absl/base/internal/malloc_hook_c.h create mode 100644 absl/base/internal/malloc_hook_invoke.h create mode 100644 absl/base/internal/malloc_hook_mmap_linux.inc create mode 100644 absl/base/internal/per_thread_tls.h create mode 100644 absl/base/internal/raw_logging.cc create mode 100644 absl/base/internal/raw_logging.h create mode 100644 absl/base/internal/scheduling_mode.h create mode 100644 absl/base/internal/spinlock.cc create mode 100644 absl/base/internal/spinlock.h create mode 100644 absl/base/internal/spinlock_posix.inc create mode 100644 absl/base/internal/spinlock_wait.cc create mode 100644 absl/base/internal/spinlock_wait.h create mode 100644 absl/base/internal/spinlock_win32.inc create mode 100644 absl/base/internal/sysinfo.cc create mode 100644 absl/base/internal/sysinfo.h create mode 100644 absl/base/internal/sysinfo_test.cc create mode 100644 absl/base/internal/thread_identity.cc create mode 100644 absl/base/internal/thread_identity.h create mode 100644 absl/base/internal/thread_identity_test.cc create mode 100644 absl/base/internal/throw_delegate.cc create mode 100644 absl/base/internal/throw_delegate.h create mode 100644 absl/base/internal/tsan_mutex_interface.h create mode 100644 absl/base/internal/unaligned_access.h create mode 100644 absl/base/internal/unscaledcycleclock.cc create mode 100644 absl/base/internal/unscaledcycleclock.h create mode 100644 absl/base/invoke_test.cc create mode 100644 absl/base/macros.h create mode 100644 absl/base/optimization.h create mode 100644 absl/base/policy_checks.h create mode 100644 absl/base/port.h create mode 100644 absl/base/raw_logging_test.cc create mode 100644 absl/base/spinlock_test_common.cc create mode 100644 absl/base/thread_annotations.h create mode 100644 absl/base/throw_delegate_test.cc create mode 100644 absl/container/BUILD.bazel create mode 100644 absl/container/fixed_array.h create mode 100644 absl/container/fixed_array_test.cc create mode 100644 absl/container/inlined_vector.h create mode 100644 absl/container/inlined_vector_test.cc create mode 100644 absl/container/internal/test_instance_tracker.cc create mode 100644 absl/container/internal/test_instance_tracker.h create mode 100644 absl/container/internal/test_instance_tracker_test.cc create mode 100644 absl/copts.bzl create mode 100644 absl/debugging/BUILD.bazel create mode 100644 absl/debugging/internal/address_is_readable.cc create mode 100644 absl/debugging/internal/address_is_readable.h create mode 100644 absl/debugging/internal/elf_mem_image.cc create mode 100644 absl/debugging/internal/elf_mem_image.h create mode 100644 absl/debugging/internal/stacktrace_aarch64-inl.inc create mode 100644 absl/debugging/internal/stacktrace_arm-inl.inc create mode 100644 absl/debugging/internal/stacktrace_config.h create mode 100644 absl/debugging/internal/stacktrace_generic-inl.inc create mode 100644 absl/debugging/internal/stacktrace_libunwind-inl.inc create mode 100644 absl/debugging/internal/stacktrace_powerpc-inl.inc create mode 100644 absl/debugging/internal/stacktrace_unimplemented-inl.inc create mode 100644 absl/debugging/internal/stacktrace_win32-inl.inc create mode 100644 absl/debugging/internal/stacktrace_x86-inl.inc create mode 100644 absl/debugging/internal/vdso_support.cc create mode 100644 absl/debugging/internal/vdso_support.h create mode 100644 absl/debugging/leak_check.cc create mode 100644 absl/debugging/leak_check.h create mode 100644 absl/debugging/leak_check_disable.cc create mode 100644 absl/debugging/leak_check_fail_test.cc create mode 100644 absl/debugging/leak_check_test.cc create mode 100644 absl/debugging/stacktrace.cc create mode 100644 absl/debugging/stacktrace.h create mode 100644 absl/memory/BUILD.bazel create mode 100644 absl/memory/README.md create mode 100644 absl/memory/memory.h create mode 100644 absl/memory/memory_test.cc create mode 100644 absl/meta/BUILD.bazel create mode 100644 absl/meta/type_traits.h create mode 100644 absl/meta/type_traits_test.cc create mode 100644 absl/numeric/BUILD.bazel create mode 100644 absl/numeric/int128.cc create mode 100644 absl/numeric/int128.h create mode 100644 absl/numeric/int128_have_intrinsic.inc create mode 100644 absl/numeric/int128_no_intrinsic.inc create mode 100644 absl/numeric/int128_test.cc create mode 100644 absl/strings/BUILD.bazel create mode 100644 absl/strings/README.md create mode 100644 absl/strings/ascii.cc create mode 100644 absl/strings/ascii.h create mode 100644 absl/strings/ascii_ctype.h create mode 100644 absl/strings/ascii_test.cc create mode 100644 absl/strings/escaping.cc create mode 100644 absl/strings/escaping.h create mode 100644 absl/strings/escaping_test.cc create mode 100644 absl/strings/internal/char_map.h create mode 100644 absl/strings/internal/char_map_test.cc create mode 100644 absl/strings/internal/escaping_test_common.inc create mode 100644 absl/strings/internal/fastmem.h create mode 100644 absl/strings/internal/fastmem_test.cc create mode 100644 absl/strings/internal/memutil.cc create mode 100644 absl/strings/internal/memutil.h create mode 100644 absl/strings/internal/memutil_test.cc create mode 100644 absl/strings/internal/numbers_test_common.inc create mode 100644 absl/strings/internal/ostringstream.h create mode 100644 absl/strings/internal/ostringstream_test.cc create mode 100644 absl/strings/internal/resize_uninitialized.h create mode 100644 absl/strings/internal/resize_uninitialized_test.cc create mode 100644 absl/strings/internal/str_join_internal.h create mode 100644 absl/strings/internal/str_split_internal.h create mode 100644 absl/strings/internal/utf8.cc create mode 100644 absl/strings/internal/utf8.h create mode 100644 absl/strings/internal/utf8_test.cc create mode 100644 absl/strings/match.cc create mode 100644 absl/strings/match.h create mode 100644 absl/strings/match_test.cc create mode 100644 absl/strings/numbers.cc create mode 100644 absl/strings/numbers.h create mode 100644 absl/strings/numbers_test.cc create mode 100644 absl/strings/str_cat.cc create mode 100644 absl/strings/str_cat.h create mode 100644 absl/strings/str_cat_test.cc create mode 100644 absl/strings/str_join.h create mode 100644 absl/strings/str_join_test.cc create mode 100644 absl/strings/str_replace.cc create mode 100644 absl/strings/str_replace.h create mode 100644 absl/strings/str_replace_test.cc create mode 100644 absl/strings/str_split.cc create mode 100644 absl/strings/str_split.h create mode 100644 absl/strings/str_split_test.cc create mode 100644 absl/strings/string_view.cc create mode 100644 absl/strings/string_view.h create mode 100644 absl/strings/string_view_test.cc create mode 100644 absl/strings/strip.cc create mode 100644 absl/strings/strip.h create mode 100644 absl/strings/strip_test.cc create mode 100644 absl/strings/substitute.cc create mode 100644 absl/strings/substitute.h create mode 100644 absl/strings/substitute_test.cc create mode 100644 absl/strings/testdata/getline-1.txt create mode 100644 absl/strings/testdata/getline-2.txt create mode 100644 absl/synchronization/BUILD.bazel create mode 100644 absl/synchronization/barrier.cc create mode 100644 absl/synchronization/barrier.h create mode 100644 absl/synchronization/blocking_counter.cc create mode 100644 absl/synchronization/blocking_counter.h create mode 100644 absl/synchronization/blocking_counter_test.cc create mode 100644 absl/synchronization/internal/create_thread_identity.cc create mode 100644 absl/synchronization/internal/create_thread_identity.h create mode 100644 absl/synchronization/internal/graphcycles.cc create mode 100644 absl/synchronization/internal/graphcycles.h create mode 100644 absl/synchronization/internal/graphcycles_test.cc create mode 100644 absl/synchronization/internal/kernel_timeout.h create mode 100644 absl/synchronization/internal/mutex_nonprod.cc create mode 100644 absl/synchronization/internal/mutex_nonprod.inc create mode 100644 absl/synchronization/internal/per_thread_sem.cc create mode 100644 absl/synchronization/internal/per_thread_sem.h create mode 100644 absl/synchronization/internal/per_thread_sem_test.cc create mode 100644 absl/synchronization/internal/thread_pool.h create mode 100644 absl/synchronization/internal/waiter.cc create mode 100644 absl/synchronization/internal/waiter.h create mode 100644 absl/synchronization/mutex.cc create mode 100644 absl/synchronization/mutex.h create mode 100644 absl/synchronization/mutex_test.cc create mode 100644 absl/synchronization/notification.cc create mode 100644 absl/synchronization/notification.h create mode 100644 absl/synchronization/notification_test.cc create mode 100644 absl/test_dependencies.bzl create mode 100644 absl/time/BUILD.bazel create mode 100644 absl/time/clock.cc create mode 100644 absl/time/clock.h create mode 100644 absl/time/clock_test.cc create mode 100644 absl/time/duration.cc create mode 100644 absl/time/duration_test.cc create mode 100644 absl/time/format.cc create mode 100644 absl/time/format_test.cc create mode 100644 absl/time/internal/get_current_time_ios.inc create mode 100644 absl/time/internal/get_current_time_posix.inc create mode 100644 absl/time/internal/get_current_time_windows.inc create mode 100644 absl/time/internal/test_util.cc create mode 100644 absl/time/internal/test_util.h create mode 100644 absl/time/internal/zoneinfo.inc create mode 100644 absl/time/time.cc create mode 100644 absl/time/time.h create mode 100644 absl/time/time_norm_test.cc create mode 100644 absl/time/time_test.cc create mode 100644 absl/time/time_zone_test.cc create mode 100644 absl/types/BUILD.bazel create mode 100644 absl/types/any.h create mode 100644 absl/types/any_test.cc create mode 100644 absl/types/bad_any_cast.cc create mode 100644 absl/types/bad_any_cast.h create mode 100644 absl/types/bad_optional_access.cc create mode 100644 absl/types/bad_optional_access.h create mode 100644 absl/types/optional.cc create mode 100644 absl/types/optional.h create mode 100644 absl/types/optional_test.cc create mode 100644 absl/types/span.h create mode 100644 absl/types/span_test.cc create mode 100644 absl/utility/BUILD.bazel create mode 100644 absl/utility/utility.cc create mode 100644 absl/utility/utility.h create mode 100644 absl/utility/utility_test.cc diff --git a/ABSEIL_ISSUE_TEMPLATE.md b/ABSEIL_ISSUE_TEMPLATE.md new file mode 100644 index 000000000..585cdc339 --- /dev/null +++ b/ABSEIL_ISSUE_TEMPLATE.md @@ -0,0 +1,22 @@ +Please submit a new Abseil Issue using the tempate below: + +## [Short title of proposed API change(s)] + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- + +## Background + +[Provide the background information that is required in order to evaluate the +proposed API changes. No controversial claims should be made here. If there are +design constraints that need to be considered, they should be presented here +**along with justification for those constraints**. Linking to other docs is +good, but please keep the **pertinent information as self contained** as +possible in this section.] + +## Proposed API Change (s) + +[Please clearly describe the API change(s) being proposed. If multiple changes, +please keep them clearly distinguished. When possible, **use example code +snippets to illustrate before–after API usages**. List pros-n-cons. Highlight +the main questions that you want to be answered.Given the Abseil project compatibility requirements, describe why the API change is safe."] diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 000000000..976d31def --- /dev/null +++ b/AUTHORS @@ -0,0 +1,6 @@ +# This is the list of Abseil authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google Inc. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..c9c89bbc4 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,90 @@ +# How to Contribute to Abseil + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +NOTE: If you are new to GitHub, please start by reading [Pull Request +howto](https://help.github.com/articles/about-pull-requests/) + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Coding Style + +To keep the source consistent, readable, diffable and easy to merge, we use a +fairly rigid coding style, as defined by the +[google-styleguide](https://github.com/google/styleguide) project. All patches +will be expected to conform to the style outlined +[here](https://google.github.io/styleguide/cppguide.html). + +## Guidelines for Pull Requests + +* If you are a Googler, it is preferable to first create an internal CL and + have it reviewed and submitted. The code propagation process will deliver + the change to GitHub. + +* Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often receive PRs that are trying to fix several things at a + time, but if only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address + different concerns and everyone will be happy. + +* For speculative changes, consider opening an [Abseil + issue](https://github.com/abseil/abseil-cpp/issues) and discussing it first. + If you are suggesting a behavioral or API change, consider starting with an + [Abseil proposal template](ABSEIL_ISSUE_TEMPLATE.md). + +* Provide a good **PR description** as a record of **what** change is being + made and **why** it was made. Link to a GitHub issue if it exists. + +* Don't fix code style and formatting unless you are already changing that + line to address an issue. PRs with irrelevant changes won't be merged. If + you do want to fix formatting or style, do that in a separate PR. + +* Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 + weeks of inactivity. + +* Maintain **clean commit history** and use **meaningful commit messages**. + PRs with messy commit history are difficult to review and won't be merged. + Use `rebase -i upstream/master` to curate your commit history and/or to + bring in latest changes from master (but avoid rebasing in the middle of a + code review). + +* Keep your PR up to date with upstream/master (if there are merge conflicts, + we can't really merge your change). + +* **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** (see below) + +* Exceptions to the rules can be made if there's a compelling reason for doing + so. That is - the rules are here to serve us, not the other way around, and + the rules need to be serving their intended purpose to be valuable. + +* All submissions, including submissions by project members, require review. + +## Running Tests + +Use "bazel test <>" functionality to run the unit tests. + +Prerequisites for building and running tests are listed in +[README.md](README.md) + +## Abseil Committers + +The current members of the Abseil engineering team are the only committers at +present. + +## Release Process + +Abseil lives at head, where latest-and-greatest code can be found. diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..fef7d9678 --- /dev/null +++ b/LICENSE @@ -0,0 +1,204 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 000000000..0301f6112 --- /dev/null +++ b/README.md @@ -0,0 +1,89 @@ +# Abseil - C++ Common Libraries + +The repository contains the Abseil C++ library code. Abseil is an open-source +collection of C++ code (compliant to C++11) designed to augment the C++ +standard library. + +## Table of Contents + +- [About Abseil](#about) +- [Codemap](#codemap) +- [License](#license) +- [Links](#links) + + +## About Abseil + +Abseil is an open-source collection of C++ library code designed to augment +the C++ standard library. The Abseil library code is collected from Google's +own C++ code base, has been extensively tested and used in production, and +is the same code we depend on in our daily coding lives. + +In some cases, Abseil provides pieces missing from the C++ standard; in +others, Abseil provides alternatives to the standard for special needs +we've found through usage in the Google code base. We denote those cases +clearly within the library code we provide you. + +Abseil is not meant to be a competitor to the standard library; we've +just found that many of these utilities serve a purpose within our code +base, and we now want to provide those resources to the C++ community as +a whole. + +## Codemap + +Abseil contains the following C++ library components: + +* [`base`](base/) Abseil Fundamentals +
The `base` library contains initialization code and other code which + all other Abseil code depends on. Code within `base` may not depend on any + other code (other than the C++ standard library). +* [`algorithm`](algorithm/) +
The `algorithm` library contains additions to the C++ `` + library and container-based versions of such algorithms. +* [`container`](container) +
The `container` library contains additional STL-style containers. +* [`debugging`](debugging) +
The `debugging` library contains code useful for enabling leak + checks. Future updates will add stacktrace and symbolization utilities. +* [`memory`](memory) +
The `memory` library contains C++11-compatible versions of + `std::make_unique()` and related memory management facilities. +* [`meta`](meta) +
The `meta` library contains C++11-compatible versions of type checks + available within C++14 and C++17 versions of the C++ `` library. +* [`numeric`](numeric) +
The `numeric` library contains C++11-compatible 128-bit integers. +* [`strings`](strings) +
The `strings` library contains a variety of strings routines and + utilities, including a C++11-compatible version of the C++17 + `std::string_view` type. +* [`synchronization`](synchronization) +
The `synchronization` library contains concurrency primitives (Abseil's + `absl::Mutex` class, an alternative to `std::mutex`) and a variety of + synchronization abstractions. +* [`time`](time) +
The `time` library contains abstractions for computing with absolute + points in time, durations of time, and formatting and parsing time within + time zones. +* [`types`](types) +
The `types` library contains non-container utility types, like a + C++11-compatible version of `absl::optional`. + +## License + +The Abseil C++ library is licensed under the terms of the Apache +license. See [LICENSE](LICENSE) for more information. + +## Links + +For more information about Abseil: + +* Consult our [Abseil Introduction](http://abseil.io/about/about/intro) +* Read [Why Adopt Abseil](http://abseil.io/about/philosophy) to understand our + design philosophy. +* Peruse our [Abseil Project Contract](http://abseil.io/about/contract) to + understand both what we promise to you, and what we expect of you in return. + +## Disclaimer + +* This is not an official Google product. diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 000000000..7d8efc7fc --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,22 @@ +workspace(name = "com_google_absl") +# GoogleTest/GoogleMock framework. Used by most unit-tests. +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# CCTZ (Time-zone framework). +# TODO(b/63158562): Make test and benchmark targets from here build. +http_archive( + name = "com_googlesource_code_cctz", + urls = ["https://github.com/google/cctz/archive/master.zip"], + strip_prefix = "cctz-master", +) + +# RE2 regular-expression framework. Used by some unit-tests. +http_archive( + name = "com_googlesource_code_re2", + urls = ["https://github.com/google/re2/archive/master.zip"], + strip_prefix = "re2-master", +) diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel new file mode 100644 index 000000000..42c107d23 --- /dev/null +++ b/absl/BUILD.bazel @@ -0,0 +1,62 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +config_setting( + name = "llvm_compiler", + values = { + "compiler": "llvm", + }, +) + +config_setting( + name = "hybrid_compiler", + values = { + "compiler": "hybrid", + }, +) + +config_setting( + name = "llvm_warnings", + values = { + "define": "ABSL_LLVM_WARNINGS=1", + }, +) + +# following configs are based on mapping defined in: https://git.io/v5Ijz +config_setting( + name = "ios", + values = { + "cpu": "darwin", + }, +) + +config_setting( + name = "windows", + values = { + "cpu": "x64_windows_msvc", + }, +) + +config_setting( + name = "ppc", + values = { + "cpu": "ppc", + }, +) diff --git a/absl/algorithm/BUILD.bazel b/absl/algorithm/BUILD.bazel new file mode 100644 index 000000000..5890bf172 --- /dev/null +++ b/absl/algorithm/BUILD.bazel @@ -0,0 +1,69 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_TEST_COPTS", +) +load( + "//absl:test_dependencies.bzl", + "GUNIT_MAIN_DEPS_SELECTOR", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "algorithm", + hdrs = ["algorithm.h"], + copts = ABSL_DEFAULT_COPTS, +) + +cc_test( + name = "algorithm_test", + size = "small", + srcs = ["algorithm_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [":algorithm"] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_library( + name = "container", + hdrs = [ + "container.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":algorithm", + "//absl/base:core_headers", + "//absl/meta:type_traits", + ], +) + +cc_test( + name = "container_test", + srcs = ["container_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":container", + "//absl/base", + "//absl/base:core_headers", + "//absl/memory", + "//absl/types:span", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) diff --git a/absl/algorithm/algorithm.h b/absl/algorithm/algorithm.h new file mode 100644 index 000000000..341b68b02 --- /dev/null +++ b/absl/algorithm/algorithm.h @@ -0,0 +1,138 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: algorithm.h +// ----------------------------------------------------------------------------- +// +// This header file contains Google extensions to the standard C++ +// header. + +#ifndef ABSL_ALGORITHM_ALGORITHM_H_ +#define ABSL_ALGORITHM_ALGORITHM_H_ + +#include +#include +#include + +namespace absl { + +namespace algorithm_internal { + +// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. +struct EqualTo { + template + bool operator()(const T& a, const U& b) const { + return a == b; + } +}; + +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred pred, std::input_iterator_tag, + std::input_iterator_tag) { + while (true) { + if (first1 == last1) return first2 == last2; + if (first2 == last2) return false; + if (!pred(*first1, *first2)) return false; + ++first1; + ++first2; + } +} + +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, + std::random_access_iterator_tag) { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2, std::forward(pred)); +} + +template +It RotateImpl(It first, It middle, It last, std::true_type) { + return std::rotate(first, middle, last); +} + +template +It RotateImpl(It first, It middle, It last, std::false_type) { + std::rotate(first, middle, last); + return std::next(first, std::distance(middle, last)); +} + +} // namespace algorithm_internal + +// Compares the equality of two ranges specified by pairs of iterators, using +// the given predicate, returning true iff for each corresponding iterator i1 +// and i2 in the first and second range respectively, pred(*i1, *i2) == true +// +// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) +// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are +// both random-access iterators, and `last1` - `first1` != `last2` - `first2`, +// then the predicate is never invoked and the function returns false. +// +// This is a C++11-compatible implementation of C++14 `std::equal`. See +// http://en.cppreference.com/w/cpp/algorithm/equal for more information. +template +bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred&& pred) { + return algorithm_internal::EqualImpl( + first1, last1, first2, last2, std::forward(pred), + typename std::iterator_traits::iterator_category{}, + typename std::iterator_traits::iterator_category{}); +} + +// Performs comparison of two ranges specified by pairs of iterators using +// operator==. +template +bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2) { + return absl::equal(first1, last1, first2, last2, + algorithm_internal::EqualTo{}); +} + +// Performs a linear search for `value` using the iterator `first` up to +// but not including `last`, returning true if [`first`, `last`) contains an +// element equal to `value`. +// +// A linear search is of O(n) complexity which is guaranteed to make at most +// n = (`last` - `first`) comparisons. A linear search over short containers +// may be faster than a binary search, even when the container is sorted. +template +bool linear_search(InputIterator first, InputIterator last, + const EqualityComparable& value) { + return std::find(first, last, value) != last; +} + +// Performs a left rotation on a range of elements (`first`, `last`) such that +// `middle` is now the first element. `rotate()` returns an iterator pointing to +// the first element before rotation. This function is exactly the same as +// `std::rotate`, but fixes a bug in gcc +// <= 4.9 where `std::rotate` returns `void` instead of an iterator. +// +// The complexity of this algorithm is the same as that of `std::rotate`, but if +// `ForwardIterator` is not a random-access iterator, then `absl::rotate` +// performs an additional pass over the range to construct the return value. + +template +ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, + ForwardIterator last) { + return algorithm_internal::RotateImpl( + first, middle, last, + std::is_same()); +} + +} // namespace absl + +#endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/absl/algorithm/algorithm_test.cc b/absl/algorithm/algorithm_test.cc new file mode 100644 index 000000000..e4322bc4f --- /dev/null +++ b/absl/algorithm/algorithm_test.cc @@ -0,0 +1,182 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/algorithm/algorithm.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +TEST(EqualTest, DefaultComparisonRandomAccess) { + std::vector v1{1, 2, 3}; + std::vector v2 = v1; + std::vector v3 = {1, 2}; + std::vector v4 = {1, 2, 4}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); +} + +TEST(EqualTest, DefaultComparison) { + std::list lst1{1, 2, 3}; + std::list lst2 = lst1; + std::list lst3{1, 2}; + std::list lst4{1, 2, 4}; + + EXPECT_TRUE(absl::equal(lst1.begin(), lst1.end(), lst2.begin(), lst2.end())); + EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst3.begin(), lst3.end())); + EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst4.begin(), lst4.end())); +} + +TEST(EqualTest, EmptyRange) { + std::vector v1{1, 2, 3}; + std::vector empty1; + std::vector empty2; + + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end())); + EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end())); + EXPECT_TRUE( + absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end())); +} + +TEST(EqualTest, MixedIterTypes) { + std::vector v1{1, 2, 3}; + std::list lst1{v1.begin(), v1.end()}; + std::list lst2{1, 2, 4}; + std::list lst3{1, 2}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), lst1.begin(), lst1.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst2.begin(), lst2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst3.begin(), lst3.end())); +} + +TEST(EqualTest, MixedValueTypes) { + std::vector v1{1, 2, 3}; + std::vector v2{1, 2, 3}; + std::vector v3{1, 2}; + std::vector v4{1, 2, 4}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); +} + +TEST(EqualTest, WeirdIterators) { + std::vector v1{true, false}; + std::vector v2 = v1; + std::vector v3{true}; + std::vector v4{true, true, true}; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); +} + +TEST(EqualTest, CustomComparison) { + int n[] = {1, 2, 3, 4}; + std::vector v1{&n[0], &n[1], &n[2]}; + std::vector v2 = v1; + std::vector v3{&n[0], &n[1], &n[3]}; + std::vector v4{&n[0], &n[1]}; + + auto eq = [](int* a, int* b) { return *a == *b; }; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), eq)); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), eq)); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end(), eq)); +} + +TEST(EqualTest, MoveOnlyPredicate) { + std::vector v1{1, 2, 3}; + std::vector v2{4, 5, 6}; + + // move-only equality predicate + struct Eq { + Eq() = default; + Eq(Eq &&) = default; + Eq(const Eq &) = delete; + Eq &operator=(const Eq &) = delete; + bool operator()(const int a, const int b) const { return a == b; } + }; + + EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v1.begin(), v1.end(), Eq())); + EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), Eq())); +} + +struct CountingTrivialPred { + int* count; + bool operator()(int, int) const { + ++*count; + return true; + } +}; + +TEST(EqualTest, RandomAccessComplexity) { + std::vector v1{1, 1, 3}; + std::vector v2 = v1; + std::vector v3{1, 2}; + + do { + int count = 0; + absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), + CountingTrivialPred{&count}); + EXPECT_LE(count, 3); + } while (std::next_permutation(v2.begin(), v2.end())); + + int count = 0; + absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), + CountingTrivialPred{&count}); + EXPECT_EQ(count, 0); +} + +class LinearSearchTest : public testing::Test { + protected: + LinearSearchTest() : container_{1, 2, 3} {} + + static bool Is3(int n) { return n == 3; } + static bool Is4(int n) { return n == 4; } + + std::vector container_; +}; + +TEST_F(LinearSearchTest, linear_search) { + EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3)); + EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4)); +} + +TEST_F(LinearSearchTest, linear_searchConst) { + const std::vector *const const_container = &container_; + EXPECT_TRUE( + absl::linear_search(const_container->begin(), const_container->end(), 3)); + EXPECT_FALSE( + absl::linear_search(const_container->begin(), const_container->end(), 4)); +} + +TEST(RotateTest, Rotate) { + std::vector v{0, 1, 2, 3, 4}; + EXPECT_EQ(*absl::rotate(v.begin(), v.begin() + 2, v.end()), 0); + EXPECT_THAT(v, testing::ElementsAreArray({2, 3, 4, 0, 1})); + + std::list l{0, 1, 2, 3, 4}; + EXPECT_EQ(*absl::rotate(l.begin(), std::next(l.begin(), 3), l.end()), 0); + EXPECT_THAT(l, testing::ElementsAreArray({3, 4, 0, 1, 2})); +} + +} // namespace diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h new file mode 100644 index 000000000..dbdc5c842 --- /dev/null +++ b/absl/algorithm/container.h @@ -0,0 +1,1652 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: container.h +// ----------------------------------------------------------------------------- +// +// This header file provides Container-based versions of algorithmic functions +// within the C++ standard library. The following standard library sets of +// functions are covered within this file: +// +// * Algorithmic functions +// * Algorithmic functions +// * functions +// +// The standard library functions operate on iterator ranges; the functions +// within this API operate on containers, though many return iterator ranges. +// +// All functions within this API are named with a `c_` prefix. Calls such as +// `absl::c_xx(container, ...) are equivalent to std:: functions such as +// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on +// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`) +// have no equivalent here. +// +// For template parameter and variable naming, `C` indicates the container type +// to which the function is applied, `Pred` indicates the predicate object type +// to be used by the function and `T` indicates the applicable element type. +// + +#ifndef ABSL_ALGORITHM_CONTAINER_H_ +#define ABSL_ALGORITHM_CONTAINER_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { + +namespace container_algorithm_internal { + +// NOTE: it is important to defer to ADL lookup for building with C++ modules, +// especially for headers like which are not visible from this file +// but specialize std::begin and std::end. +using std::begin; +using std::end; + +// The type of the iterator given by begin(c) (possibly std::begin(c)). +// ContainerIter> gives vector::const_iterator, +// while ContainerIter> gives vector::iterator. +template +using ContainerIter = decltype(begin(std::declval())); + +template +using ContainerDifferenceType = + decltype(std::distance(std::declval>(), + std::declval>())); + +template +using ContainerPointerType = + typename std::iterator_traits>::pointer; + +// container_algorithm_internal::c_begin and +// container_algorithm_internal::c_end are abbreviations for proper ADL +// lookup of std::begin and std::end, i.e. +// using std::begin; +// using std::end; +// std::foo(begin(c), end(c); +// becomes +// std::foo(container_algorithm_internal::begin(c), +// container_algorithm_internal::end(c)); +// These are meant for internal use only. + +template +ContainerIter c_begin(C& c) { return begin(c); } + +template +ContainerIter c_end(C& c) { return end(c); } + +} // namespace container_algorithm_internal + +// PUBLIC API + +//------------------------------------------------------------------------------ +// Abseil algorithm.h functions +//------------------------------------------------------------------------------ + +// c_linear_search() +// +// Container-based version of absl::linear_search() for performing a linear +// search within a container. +template +bool c_linear_search(const C& c, EqualityComparable&& value) { + return linear_search(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_distance() +// +// Container-based version of the `std::distance()` function to +// return the number of elements within a container. +template +container_algorithm_internal::ContainerDifferenceType c_distance( + const C& c) { + return std::distance(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +//------------------------------------------------------------------------------ +// Non-modifying sequence operations +//------------------------------------------------------------------------------ + +// c_all_of() +// +// Container-based version of the `std::all_of()` function to +// test a condition on all elements within a container. +template +bool c_all_of(const C& c, Pred&& pred) { + return std::all_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_any_of() +// +// Container-based version of the `std::any_of()` function to +// test if any element in a container fulfills a condition. +template +bool c_any_of(const C& c, Pred&& pred) { + return std::any_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_none_of() +// +// Container-based version of the `std::none_of()` function to +// test if no elements in a container fulfil a condition. +template +bool c_none_of(const C& c, Pred&& pred) { + return std::none_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_for_each() +// +// Container-based version of the `std::for_each()` function to +// apply a function to a container's elements. +template +decay_t c_for_each(C&& c, Function&& f) { + return std::for_each(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(f)); +} + +// c_find() +// +// Container-based version of the `std::find()` function to find +// the first element containing the passed value within a container value. +template +container_algorithm_internal::ContainerIter c_find(C& c, T&& value) { + return std::find(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_find_if() +// +// Container-based version of the `std::find_if()` function to find +// the first element in a container matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) { + return std::find_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_if_not() +// +// Container-based version of the `std::find_if_not()` function to +// find the first element in a container not matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if_not(C& c, + Pred&& pred) { + return std::find_if_not(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_end() +// +// Container-based version of the `std::find_end()` function to +// find the last subsequence within a container. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_find_end() for using a predicate evaluation other than `==` as +// the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_find_first_of() +// +// Container-based version of the `std::find_first_of()` function to +// find the first elements in an ordered set within a container. +template +container_algorithm_internal::ContainerIter c_find_first_of(C1& container, + C2& options) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options)); +} + +// Overload of c_find_first_of() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_first_of( + C1& container, C2& options, BinaryPredicate&& pred) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options), + std::forward(pred)); +} + +// c_adjacent_find() +// +// Container-based version of the `std::adjacent_find()` function to +// find equal adjacent elements within a container. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_adjacent_find() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence, BinaryPredicate&& pred) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(pred)); +} + +// c_count() +// +// Container-based version of the `std::count()` function to count +// values that match within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count( + const C& c, T&& value) { + return std::count(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_count_if() +// +// Container-based version of the `std::count_if()` function to +// count values matching a condition within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count_if( + const C& c, Pred&& pred) { + return std::count_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_mismatch() +// +// Container-based version of the `std::mismatchf()` function to +// return the first element where two ordered containers differ. +template +std::pair, + container_algorithm_internal::ContainerIter> +c_mismatch(C1& c1, C2& c2) { + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2)); +} + +// Overload of c_mismatch() for using a predicate evaluation other than `==` as +// the function's test condition. +template +std::pair, + container_algorithm_internal::ContainerIter> +c_mismatch(C1& c1, C2& c2, BinaryPredicate&& pred) { + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + std::forward(pred)); +} + +// c_equal() +// +// Container-based version of the `std::equal()` function to +// test whether two containers are equal. +// +// NOTE: the semantics of c_equal() are slightly different than those of +// equal(): while the latter iterates over the second container only up to the +// size of the first container, c_equal() also checks whether the container +// sizes are equal. This better matches expectations about c_equal() based on +// its signature. +// +// Example: +// vector v1 = <1, 2, 3>; +// vector v2 = <1, 2, 3, 4>; +// equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true +// c_equal(v1, v2) returns false + +template +bool c_equal(const C1& c1, const C2& c2) { + return ((c1.size() == c2.size()) && + std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2))); +} + +// Overload of c_equal() for using a predicate evaluation other than `==` as +// the function's test condition. +template +bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + return ((c1.size() == c2.size()) && + std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + std::forward(pred))); +} + +// c_is_permutation() +// +// Container-based version of the `std::is_permutation()` function +// to test whether a container is a permutation of another. +template +bool c_is_permutation(const C1& c1, const C2& c2) { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2)); +} + +// Overload of c_is_permutation() for using a predicate evaluation other than +// `==` as the function's test condition. +template +bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2), + std::forward(pred)); +} + +// c_search() +// +// Container-based version of the `std::search()` function to search +// a container for a subsequence. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_search() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_search_n() +// +// Container-based version of the `std::search_n()` function to +// search a container for the first sequence of N elements. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value)); +} + +// Overload of c_search_n() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Modifying sequence operations +//------------------------------------------------------------------------------ + +// c_copy() +// +// Container-based version of the `std::copy()` function to copy a +// container's elements into an iterator. +template +OutputIterator c_copy(const InputSequence& input, OutputIterator output) { + return std::copy(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output); +} + +// c_copy_n() +// +// Container-based version of the `std::copy_n()` function to copy a +// container's first N elements into an iterator. +template +OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { + return std::copy_n(container_algorithm_internal::c_begin(input), n, output); +} + +// c_copy_if() +// +// Container-based version of the `std::copy_if()` function to copy +// a container's elements satisfying some condition into an iterator. +template +OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, + Pred&& pred) { + return std::copy_if(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(pred)); +} + +// c_copy_backward() +// +// Container-based version of the `std::copy_backward()` function to +// copy a container's elements in reverse order into an iterator. +template +BidirectionalIterator c_copy_backward(const C& src, + BidirectionalIterator dest) { + return std::copy_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move() +// +// Container-based version of the `std::move()` function to move +// a container's elements into an iterator. +template +OutputIterator c_move(C& src, OutputIterator dest) { + return std::move(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move_backward() +// +// Container-based version of the `std::move_backward()` function to +// move a container's elements into an iterator in reverse order. +template +BidirectionalIterator c_move_backward(C& src, BidirectionalIterator dest) { + return std::move_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_swap_ranges() +// +// Container-based version of the `std::swap_ranges()` function to +// swap a container's elements with another container's elements. +template +container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { + return std::swap_ranges(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2)); +} + +// c_transform() +// +// Container-based version of the `std::transform()` function to +// transform a container's elements using the unary operation, storing the +// result in an iterator pointing to the last transformed element in the output +// range. +template +OutputIterator c_transform(const InputSequence& input, OutputIterator output, + UnaryOp&& unary_op) { + return std::transform(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(unary_op)); +} + +// Overload of c_transform() for performing a transformation using a binary +// predicate. +template +OutputIterator c_transform(const InputSequence1& input1, + const InputSequence2& input2, OutputIterator output, + BinaryOp&& binary_op) { + return std::transform(container_algorithm_internal::c_begin(input1), + container_algorithm_internal::c_end(input1), + container_algorithm_internal::c_begin(input2), output, + std::forward(binary_op)); +} + +// c_replace() +// +// Container-based version of the `std::replace()` function to +// replace a container's elements of some value with a new value. The container +// is modified in place. +template +void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { + std::replace(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), old_value, + new_value); +} + +// c_replace_if() +// +// Container-based version of the `std::replace_if()` function to +// replace a container's elements of some value with a new value based on some +// condition. The container is modified in place. +template +void c_replace_if(C& c, Pred&& pred, T&& new_value) { + std::replace_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred), std::forward(new_value)); +} + +// c_replace_copy() +// +// Container-based version of the `std::replace_copy()` function to +// replace a container's elements of some value with a new value and return the +// results within an iterator. +template +OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, + T&& new_value) { + return std::replace_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(old_value), + std::forward(new_value)); +} + +// c_replace_copy_if() +// +// Container-based version of the `std::replace_copy_if()` function +// to replace a container's elements of some value with a new value based on +// some condition, and return the results within an iterator. +template +OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, + T&& new_value) { + return std::replace_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred), + std::forward(new_value)); +} + +// c_fill() +// +// Container-based version of the `std::fill()` function to fill a +// container with some value. +template +void c_fill(C& c, T&& value) { + std::fill(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), std::forward(value)); +} + +// c_fill_n() +// +// Container-based version of the `std::fill_n()` function to fill +// the first N elements in a container with some value. +template +void c_fill_n(C& c, Size n, T&& value) { + std::fill_n(container_algorithm_internal::c_begin(c), n, + std::forward(value)); +} + +// c_generate() +// +// Container-based version of the `std::generate()` function to +// assign a container's elements to the values provided by the given generator. +template +void c_generate(C& c, Generator&& gen) { + std::generate(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +// c_generate_n() +// +// Container-based version of the `std::generate_n()` function to +// assign a container's first N elements to the values provided by the given +// generator. +template +container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, + Generator&& gen) { + return std::generate_n(container_algorithm_internal::c_begin(c), n, + std::forward(gen)); +} + +// Note: `c_xx()` container versions for `remove()`, `remove_if()`, +// and `unique()` are omitted, because it's not clear whether or not such +// functions should call erase their supplied sequences afterwards. Either +// behavior would be surprising for a different set of users. +// + +// c_remove_copy() +// +// Container-based version of the `std::remove_copy()` function to +// copy a container's elements while removing any elements matching the given +// `value`. +template +OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) { + return std::remove_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(value)); +} + +// c_remove_copy_if() +// +// Container-based version of the `std::remove_copy_if()` function +// to copy a container's elements while removing any elements matching the given +// condition. +template +OutputIterator c_remove_copy_if(const C& c, OutputIterator result, + Pred&& pred) { + return std::remove_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_unique_copy() +// +// Container-based version of the `std::unique_copy()` function to +// copy a container's elements while removing any elements containing duplicate +// values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result); +} + +// Overload of c_unique_copy() for using a predicate evaluation other than +// `==` for comparing uniqueness of the element values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result, + BinaryPredicate&& pred) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_reverse() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements. +template +void c_reverse(Sequence& sequence) { + std::reverse(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// c_reverse_copy() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements and write them to an iterator range. +template +OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { + return std::reverse_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + result); +} + +// c_rotate() +// +// Container-based version of the `std::rotate()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in the container. +template > +Iterator c_rotate(C& sequence, Iterator middle) { + return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// c_rotate_copy() +// +// Container-based version of the `std::rotate_copy()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in a new iterator range. +template +OutputIterator c_rotate_copy( + const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result) { + return std::rotate_copy(container_algorithm_internal::c_begin(sequence), + middle, container_algorithm_internal::c_end(sequence), + result); +} + +// c_shuffle() +// +// Container-based version of the `std::shuffle()` function to +// randomly shuffle elements within the container using a `gen()` uniform random +// number generator. +template +void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { + std::shuffle(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +//------------------------------------------------------------------------------ +// Partition functions +//------------------------------------------------------------------------------ + +// c_is_partitioned() +// +// Container-based version of the `std::is_partitioned()` function +// to test whether all elements in the container for which `pred` returns `true` +// precede those for which `pred` is `false`. +template +bool c_is_partitioned(const C& c, Pred&& pred) { + return std::is_partitioned(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition() +// +// Container-based version of the `std::partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// returning an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { + return std::partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_stable_partition() +// +// Container-based version of the `std::stable_partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// preserving the relative ordering between the two groups. The function returns +// an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_stable_partition(C& c, + Pred&& pred) { + return std::stable_partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition_copy() +// +// Container-based version of the `std::partition_copy()` function +// to partition a container's elements and return them into two iterators: one +// for which `pred` returns `true`, and one for which `pred` returns `false.` + +template +std::pair c_partition_copy( + const C& c, OutputIterator1 out_true, OutputIterator2 out_false, + Pred&& pred) { + return std::partition_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), out_true, + out_false, std::forward(pred)); +} + +// c_partition_point() +// +// Container-based version of the `std::partition_point()` function +// to return the first element of an already partitioned container for which +// the given `pred` is not `true`. +template +container_algorithm_internal::ContainerIter c_partition_point(C& c, + Pred&& pred) { + return std::partition_point(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Sorting functions +//------------------------------------------------------------------------------ + +// c_sort() +// +// Container-based version of the `std::sort()` function +// to sort elements in ascending order of their values. +template +void c_sort(C& c) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_sort(C& c, Compare&& comp) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_stable_sort() +// +// Container-based version of the `std::stable_sort()` function +// to sort elements in ascending order of their values, preserving the order +// of equivalents. +template +void c_stable_sort(C& c) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_stable_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_stable_sort(C& c, Compare&& comp) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_is_sorted() +// +// Container-based version of the `std::is_sorted()` function +// to evaluate whethr the given containter is sorted in ascending order. +template +bool c_is_sorted(const C& c) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// c_is_sorted() overload for performing a `comp` comparison other than the +// default `operator<`. +template +bool c_is_sorted(const C& c, Compare&& comp) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_partial_sort() +// +// Container-based version of the `std::partial_sort()` function +// to rearrange elements within a container such that elements before `middle` +// are sorted in ascending order. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_partial_sort() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle, + Compare&& comp) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_partial_sort_copy() +// +// Container-based version of the `std::partial_sort_copy()` +// function to sort elements within a container such that elements before +// `middle` are sorted in ascending order, and return the result within an +// iterator. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result)); +} + +// Overload of c_partial_sort_copy() for performing a `comp` comparison other +// than the default `operator<`. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, + Compare&& comp) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result), + std::forward(comp)); +} + +// c_is_sorted_until() +// +// Container-based version of the `std::is_sorted_until()` function +// to return the first element within a container that is not sorted in +// ascending order as an iterator. +template +container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_is_sorted_until() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_is_sorted_until( + C& c, Compare&& comp) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_nth_element() +// +// Container-based version of the `std::nth_element()` function +// to rearrange the elements within a container such that the `nth` element +// would be in that position in an ordered sequence; other elements may be in +// any order, except that all preceding `nth` will be less than that element, +// and all following `nth` will be greater than that element. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_nth_element() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth, + Compare&& comp) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Binary Search +//------------------------------------------------------------------------------ + +// c_lower_bound() +// +// Container-based version of the `std::lower_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which does not compare less than `value`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_lower_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value, Compare&& comp) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_upper_bound() +// +// Container-based version of the `std::upper_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which is greater than `value`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_upper_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value, Compare&& comp) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_equal_range() +// +// Container-based version of the `std::equal_range()` function +// to return an iterator pair pointing to the first and last elements in a +// sorted container which compare equal to `value`. +template +std::pair, + container_algorithm_internal::ContainerIter> +c_equal_range(Sequence& sequence, T&& value) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_equal_range() for performing a `comp` comparison other than +// the default `operator<`. +template +std::pair, + container_algorithm_internal::ContainerIter> +c_equal_range(Sequence& sequence, T&& value, Compare&& comp) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_binary_search() +// +// Container-based version of the `std::binary_search()` function +// to test if any element in the sorted container contains a value equivalent to +// 'value'. +template +bool c_binary_search(Sequence&& sequence, T&& value) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_binary_search() for performing a `comp` comparison other than +// the default `operator<`. +template +bool c_binary_search(Sequence&& sequence, T&& value, Compare&& comp) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Merge functions +//------------------------------------------------------------------------------ + +// c_merge() +// +// Container-based version of the `std::merge()` function +// to merge two sorted containers into a single sorted iterator. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result); +} + +// Overload of c_merge() for performing a `comp` comparison other than +// the default `operator<`. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, + Compare&& comp) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result, + std::forward(comp)); +} + +// c_inplace_merge() +// +// Container-based version of the `std::inplace_merge()` function +// to merge a supplied iterator `middle` into a container. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c)); +} + +// Overload of c_inplace_merge() for performing a merge using a `comp` other +// than `operator<`. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle, + Compare&& comp) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_includes() +// +// Container-based version of the `std::includes()` function +// to test whether a sorted container `c1` entirely contains another sorted +// container `c2`. +template +bool c_includes(const C1& c1, const C2& c2) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_includes() for performing a merge using a `comp` other than +// `operator<`. +template +bool c_includes(const C1& c1, const C2& c2, Compare&& comp) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(comp)); +} + +// c_set_union() +// +// Container-based version of the `std::set_union()` function +// to return an iterator containing the union of two containers; duplicate +// values are not copied into the output. +template +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_union() for performing a merge using a `comp` other than +// `operator<`. +template +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, + Compare&& comp) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_intersection() +// +// Container-based version of the `std::set_intersection()` function +// to return an iterator containing the intersection of two containers. +template +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_intersection() for performing a merge using a `comp` other +// than `operator<`. +template +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output, Compare&& comp) { + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_difference() +// +// Container-based version of the `std::set_difference()` function +// to return an iterator containing elements present in the first container but +// not in the second. +template +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_difference() for performing a merge using a `comp` other +// than `operator<`. +template +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output, Compare&& comp) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_symmetric_difference() +// +// Container-based version of the `std::set_symmetric_difference()` +// function to return an iterator containing elements present in either one +// container or the other, but not both. +template +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_symmetric_difference() for performing a merge using a +// `comp` other than `operator<`. +template +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output, + Compare&& comp) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Heap functions +//------------------------------------------------------------------------------ + +// c_push_heap() +// +// Container-based version of the `std::push_heap()` function +// to push a value onto a container heap. +template +void c_push_heap(RandomAccessContainer& sequence) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_push_heap() for performing a push operation on a heap using a +// `comp` other than `operator<`. +template +void c_push_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_pop_heap() +// +// Container-based version of the `std::pop_heap()` function +// to pop a value from a heap container. +template +void c_pop_heap(RandomAccessContainer& sequence) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_pop_heap() for performing a pop operation on a heap using a +// `comp` other than `operator<`. +template +void c_pop_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_make_heap() +// +// Container-based version of the `std::make_heap()` function +// to make a container a heap. +template +void c_make_heap(RandomAccessContainer& sequence) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_make_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_make_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_sort_heap() +// +// Container-based version of the `std::sort_heap()` function +// to sort a heap into ascending order (after which it is no longer a heap). +template +void c_sort_heap(RandomAccessContainer& sequence) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_sort_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_sort_heap(RandomAccessContainer& sequence, Compare&& comp) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap() +// +// Container-based version of the `std::is_heap()` function +// to check whether the given container is a heap. +template +bool c_is_heap(const RandomAccessContainer& sequence) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +bool c_is_heap(const RandomAccessContainer& sequence, Compare&& comp) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap_until() +// +// Container-based version of the `std::is_heap_until()` function +// to find the first element in a given container which is not in heap order. +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap_until() for performing heap comparisons using a +// `comp` other than `operator<` +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence, Compare&& comp) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Min/max +//------------------------------------------------------------------------------ + +// c_min_element() +// +// Container-based version of the `std::min_element()` function +// to return an iterator pointing to the element with the smallest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_min_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence, Compare&& comp) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_max_element() +// +// Container-based version of the `std::max_element()` function +// to return an iterator pointing to the element with the largest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_max_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence, Compare&& comp) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_minmax_element() +// +// Container-based version of the `std::minmax_element()` function +// to return a pair of iterators pointing to the elements containing the +// smallest and largest values, respectively, using `operator<` to make the +// comparisons. +template +std::pair, + container_algorithm_internal::ContainerIter> +c_minmax_element(C& c) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_minmax_element() for performing `comp` comparisons other than +// `operator<`. +template +std::pair, + container_algorithm_internal::ContainerIter> +c_minmax_element(C& c, Compare&& comp) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Lexicographical Comparisons +//------------------------------------------------------------------------------ + +// c_lexicographical_compare() +// +// Container-based version of the `std::lexicographical_compare()` +// function to lexicographically compare (e.g. sort words alphabetically) two +// container sequences. The comparison is performed using `operator<`. Note +// that capital letters ("A-Z") have ASCII values less than lowercase letters +// ("a-z"). +template +bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2)); +} + +// Overload of c_lexicographical_compare() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, + Compare&& comp) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2), + std::forward(comp)); +} + +// c_next_permutation() +// +// Container-based version of the `std::next_permutation()` function +// to rearrange a container's elements into the next lexicographically greater +// permutation. +template +bool c_next_permutation(C& c) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_next_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_next_permutation(C& c, Compare&& comp) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_prev_permutation() +// +// Container-based version of the `std::prev_permutation()` function +// to rearrange a container's elements into the next lexicographically lesser +// permutation. +template +bool c_prev_permutation(C& c) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_prev_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_prev_permutation(C& c, Compare&& comp) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_iota() +// +// Container-based version of the `std::iota()` function +// to compute successive values of `value`, as if incremented with `++value` +// after each element is written. and write them to the container. +template +void c_iota(Sequence& sequence, T&& value) { + std::iota(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} +// c_accumulate() +// +// Container-based version of the `std::accumulate()` function +// to accumulate the element values of a container to `init` and return that +// accumulation by value. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_accumulate(const Sequence& sequence, T&& init) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init)); +} + +// Overload of c_accumulate() for using a binary operations other than +// addition for computing the accumulation. +template +decay_t c_accumulate(const Sequence& sequence, T&& init, + BinaryOp&& binary_op) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init), + std::forward(binary_op)); +} + +// c_inner_product() +// +// Container-based version of the `std::inner_product()` function +// to compute the cumulative inner product of container element pairs. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum)); +} + +// Overload of c_inner_product() for using binary operations other than +// `operator+` (for computing the accumlation) and `operator*` (for computing +// the product between the two container's element pair). +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum), std::forward(op1), + std::forward(op2)); +} + +// c_adjacent_difference() +// +// Container-based version of the `std::adjacent_difference()` +// function to compute the difference between each element and the one preceding +// it and write it to an iterator. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_adjacent_difference() for using a binary operation other than +// subtraction to compute the adjacent difference. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first, BinaryOp&& op) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +// c_partial_sum() +// +// Container-based version of the `std::partial_sum()` function +// to compute the partial sum of the elements in a sequence and write them +// to an iterator. The partial sum is the sum of all element values so far in +// the sequence. +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_partial_sum() for using a binary operation other than addition +// to compute the "partial sum". +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, + BinaryOp&& op) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +} // namespace absl + +#endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/absl/algorithm/container_test.cc b/absl/algorithm/container_test.cc new file mode 100644 index 000000000..093b28147 --- /dev/null +++ b/absl/algorithm/container_test.cc @@ -0,0 +1,1010 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/algorithm/container.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" +#include "absl/memory/memory.h" +#include "absl/types/span.h" + +namespace { + +using ::testing::Each; +using ::testing::ElementsAre; +using ::testing::Gt; +using ::testing::IsNull; +using ::testing::Lt; +using ::testing::Pointee; +using ::testing::Truly; +using ::testing::UnorderedElementsAre; + +// Most of these tests just check that the code compiles, not that it +// does the right thing. That's fine since the functions just forward +// to the STL implementation. +class NonMutatingTest : public testing::Test { + protected: + std::unordered_set container_ = {1, 2, 3}; + std::list sequence_ = {1, 2, 3}; + std::vector vector_ = {1, 2, 3}; + int array_[3] = {1, 2, 3}; +}; + +struct AccumulateCalls { + void operator()(int value) { + calls.push_back(value); + } + std::vector calls; +}; + +bool Predicate(int value) { return value < 3; } +bool BinPredicate(int v1, int v2) { return v1 < v2; } +bool Equals(int v1, int v2) { return v1 == v2; } +bool IsOdd(int x) { return x % 2 != 0; } + + +TEST_F(NonMutatingTest, Distance) { + EXPECT_EQ(container_.size(), absl::c_distance(container_)); + EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_)); + EXPECT_EQ(vector_.size(), absl::c_distance(vector_)); + EXPECT_EQ(ABSL_ARRAYSIZE(array_), absl::c_distance(array_)); + + // Works with a temporary argument. + EXPECT_EQ(vector_.size(), absl::c_distance(std::vector(vector_))); +} + +TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) { + // Works with classes which have custom ADL-selected overloads of std::begin + // and std::end. + std::initializer_list a = {1, 2, 3}; + std::valarray b = {1, 2, 3}; + EXPECT_EQ(3, absl::c_distance(a)); + EXPECT_EQ(3, absl::c_distance(b)); + + // It is assumed that other c_* functions use the same mechanism for + // ADL-selecting begin/end overloads. +} + +TEST_F(NonMutatingTest, ForEach) { + AccumulateCalls c = absl::c_for_each(container_, AccumulateCalls()); + // Don't rely on the unordered_set's order. + std::sort(c.calls.begin(), c.calls.end()); + EXPECT_EQ(vector_, c.calls); + + // Works with temporary container, too. + AccumulateCalls c2 = + absl::c_for_each(std::unordered_set(container_), AccumulateCalls()); + std::sort(c2.calls.begin(), c2.calls.end()); + EXPECT_EQ(vector_, c2.calls); +} + +TEST_F(NonMutatingTest, FindReturnsCorrectType) { + auto it = absl::c_find(container_, 3); + EXPECT_EQ(3, *it); + absl::c_find(absl::implicit_cast&>(sequence_), 3); +} + +TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); } + +TEST_F(NonMutatingTest, FindIfNot) { + absl::c_find_if_not(container_, Predicate); +} + +TEST_F(NonMutatingTest, FindEnd) { + absl::c_find_end(sequence_, vector_); + absl::c_find_end(vector_, sequence_); +} + +TEST_F(NonMutatingTest, FindEndWithPredicate) { + absl::c_find_end(sequence_, vector_, BinPredicate); + absl::c_find_end(vector_, sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, FindFirstOf) { + absl::c_find_first_of(container_, sequence_); + absl::c_find_first_of(sequence_, container_); +} + +TEST_F(NonMutatingTest, FindFirstOfWithPredicate) { + absl::c_find_first_of(container_, sequence_, BinPredicate); + absl::c_find_first_of(sequence_, container_, BinPredicate); +} + +TEST_F(NonMutatingTest, AdjacentFind) { absl::c_adjacent_find(sequence_); } + +TEST_F(NonMutatingTest, AdjacentFindWithPredicate) { + absl::c_adjacent_find(sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, Count) { EXPECT_EQ(1, absl::c_count(container_, 3)); } + +TEST_F(NonMutatingTest, CountIf) { + EXPECT_EQ(2, absl::c_count_if(container_, Predicate)); + const std::unordered_set& const_container = container_; + EXPECT_EQ(2, absl::c_count_if(const_container, Predicate)); +} + +TEST_F(NonMutatingTest, Mismatch) { + absl::c_mismatch(container_, sequence_); + absl::c_mismatch(sequence_, container_); +} + +TEST_F(NonMutatingTest, MismatchWithPredicate) { + absl::c_mismatch(container_, sequence_, BinPredicate); + absl::c_mismatch(sequence_, container_, BinPredicate); +} + +TEST_F(NonMutatingTest, Equal) { + EXPECT_TRUE(absl::c_equal(vector_, sequence_)); + EXPECT_TRUE(absl::c_equal(sequence_, vector_)); + + // Test that behavior appropriately differs from that of equal(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_equal(vector_plus, sequence_)); + EXPECT_FALSE(absl::c_equal(sequence_, vector_plus)); +} + +TEST_F(NonMutatingTest, EqualWithPredicate) { + EXPECT_TRUE(absl::c_equal(vector_, sequence_, Equals)); + EXPECT_TRUE(absl::c_equal(sequence_, vector_, Equals)); + + // Test that behavior appropriately differs from that of equal(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_equal(vector_plus, sequence_, Equals)); + EXPECT_FALSE(absl::c_equal(sequence_, vector_plus, Equals)); +} + +TEST_F(NonMutatingTest, IsPermutation) { + auto vector_permut_ = vector_; + std::next_permutation(vector_permut_.begin(), vector_permut_.end()); + EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_)); + EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_)); + + // Test that behavior appropriately differs from that of is_permutation(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_)); + EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus)); +} + +TEST_F(NonMutatingTest, IsPermutationWithPredicate) { + auto vector_permut_ = vector_; + std::next_permutation(vector_permut_.begin(), vector_permut_.end()); + EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_, Equals)); + EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_, Equals)); + + // Test that behavior appropriately differs from that of is_permutation(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_, Equals)); + EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus, Equals)); +} + +TEST_F(NonMutatingTest, Search) { + absl::c_search(sequence_, vector_); + absl::c_search(vector_, sequence_); + absl::c_search(array_, sequence_); +} + +TEST_F(NonMutatingTest, SearchWithPredicate) { + absl::c_search(sequence_, vector_, BinPredicate); + absl::c_search(vector_, sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); } + +TEST_F(NonMutatingTest, SearchNWithPredicate) { + absl::c_search_n(sequence_, 3, 1, BinPredicate); +} + +TEST_F(NonMutatingTest, LowerBound) { + std::list::iterator i = absl::c_lower_bound(sequence_, 3); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(2, std::distance(sequence_.begin(), i)); + EXPECT_EQ(3, *i); +} + +TEST_F(NonMutatingTest, LowerBoundWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::vector::iterator i = absl::c_lower_bound(v, 3, std::greater()); + EXPECT_TRUE(i == v.begin()); + EXPECT_EQ(3, *i); +} + +TEST_F(NonMutatingTest, UpperBound) { + std::list::iterator i = absl::c_upper_bound(sequence_, 1); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(1, std::distance(sequence_.begin(), i)); + EXPECT_EQ(2, *i); +} + +TEST_F(NonMutatingTest, UpperBoundWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::vector::iterator i = absl::c_upper_bound(v, 1, std::greater()); + EXPECT_EQ(3, i - v.begin()); + EXPECT_TRUE(i == v.end()); +} + +TEST_F(NonMutatingTest, EqualRange) { + std::pair::iterator, std::list::iterator> p = + absl::c_equal_range(sequence_, 2); + EXPECT_EQ(1, std::distance(sequence_.begin(), p.first)); + EXPECT_EQ(2, std::distance(sequence_.begin(), p.second)); +} + +TEST_F(NonMutatingTest, EqualRangeArray) { + auto p = absl::c_equal_range(array_, 2); + EXPECT_EQ(1, std::distance(std::begin(array_), p.first)); + EXPECT_EQ(2, std::distance(std::begin(array_), p.second)); +} + +TEST_F(NonMutatingTest, EqualRangeWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::pair::iterator, std::vector::iterator> p = + absl::c_equal_range(v, 2, std::greater()); + EXPECT_EQ(1, std::distance(v.begin(), p.first)); + EXPECT_EQ(2, std::distance(v.begin(), p.second)); +} + +TEST_F(NonMutatingTest, BinarySearch) { + EXPECT_TRUE(absl::c_binary_search(vector_, 2)); + EXPECT_TRUE(absl::c_binary_search(std::vector(vector_), 2)); +} + +TEST_F(NonMutatingTest, BinarySearchWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + EXPECT_TRUE(absl::c_binary_search(v, 2, std::greater())); + EXPECT_TRUE( + absl::c_binary_search(std::vector(v), 2, std::greater())); +} + +TEST_F(NonMutatingTest, MinElement) { + std::list::iterator i = absl::c_min_element(sequence_); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 1); +} + +TEST_F(NonMutatingTest, MinElementWithPredicate) { + std::list::iterator i = + absl::c_min_element(sequence_, std::greater()); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 3); +} + +TEST_F(NonMutatingTest, MaxElement) { + std::list::iterator i = absl::c_max_element(sequence_); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 3); +} + +TEST_F(NonMutatingTest, MaxElementWithPredicate) { + std::list::iterator i = + absl::c_max_element(sequence_, std::greater()); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 1); +} + +TEST_F(NonMutatingTest, LexicographicalCompare) { + EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_)); + + std::vector v; + v.push_back(1); + v.push_back(2); + v.push_back(4); + + EXPECT_TRUE(absl::c_lexicographical_compare(sequence_, v)); + EXPECT_TRUE(absl::c_lexicographical_compare(std::list(sequence_), v)); +} + +TEST_F(NonMutatingTest, LexicographicalCopmareWithPredicate) { + EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_, + std::greater())); + + std::vector v; + v.push_back(1); + v.push_back(2); + v.push_back(4); + + EXPECT_TRUE( + absl::c_lexicographical_compare(v, sequence_, std::greater())); + EXPECT_TRUE(absl::c_lexicographical_compare( + std::vector(v), std::list(sequence_), std::greater())); +} + +TEST_F(NonMutatingTest, Includes) { + std::set s(vector_.begin(), vector_.end()); + s.insert(4); + EXPECT_TRUE(absl::c_includes(s, vector_)); +} + +TEST_F(NonMutatingTest, IncludesWithPredicate) { + std::vector v = {3, 2, 1}; + std::set> s(v.begin(), v.end()); + s.insert(4); + EXPECT_TRUE(absl::c_includes(s, v, std::greater())); +} + +class NumericMutatingTest : public testing::Test { + protected: + std::list list_ = {1, 2, 3}; + std::vector output_; +}; + +TEST_F(NumericMutatingTest, Iota) { + absl::c_iota(list_, 5); + std::list expected{5, 6, 7}; + EXPECT_EQ(list_, expected); +} + +TEST_F(NonMutatingTest, Accumulate) { + EXPECT_EQ(absl::c_accumulate(sequence_, 4), 1 + 2 + 3 + 4); +} + +TEST_F(NonMutatingTest, AccumulateWithBinaryOp) { + EXPECT_EQ(absl::c_accumulate(sequence_, 4, std::multiplies()), + 1 * 2 * 3 * 4); +} + +TEST_F(NonMutatingTest, AccumulateLvalueInit) { + int lvalue = 4; + EXPECT_EQ(absl::c_accumulate(sequence_, lvalue), 1 + 2 + 3 + 4); +} + +TEST_F(NonMutatingTest, AccumulateWithBinaryOpLvalueInit) { + int lvalue = 4; + EXPECT_EQ(absl::c_accumulate(sequence_, lvalue, std::multiplies()), + 1 * 2 * 3 * 4); +} + +TEST_F(NonMutatingTest, InnerProduct) { + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 1000), + 1000 + 1 * 1 + 2 * 2 + 3 * 3); +} + +TEST_F(NonMutatingTest, InnerProductWithBinaryOps) { + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 10, + std::multiplies(), std::plus()), + 10 * (1 + 1) * (2 + 2) * (3 + 3)); +} + +TEST_F(NonMutatingTest, InnerProductLvalueInit) { + int lvalue = 1000; + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue), + 1000 + 1 * 1 + 2 * 2 + 3 * 3); +} + +TEST_F(NonMutatingTest, InnerProductWithBinaryOpsLvalueInit) { + int lvalue = 10; + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue, + std::multiplies(), std::plus()), + 10 * (1 + 1) * (2 + 2) * (3 + 3)); +} + +TEST_F(NumericMutatingTest, AdjacentDifference) { + auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_)); + *last = 1000; + std::vector expected{1, 2 - 1, 3 - 2, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, AdjacentDifferenceWithBinaryOp) { + auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_), + std::multiplies()); + *last = 1000; + std::vector expected{1, 2 * 1, 3 * 2, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, PartialSum) { + auto last = absl::c_partial_sum(list_, std::back_inserter(output_)); + *last = 1000; + std::vector expected{1, 1 + 2, 1 + 2 + 3, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, PartialSumWithBinaryOp) { + auto last = absl::c_partial_sum(list_, std::back_inserter(output_), + std::multiplies()); + *last = 1000; + std::vector expected{1, 1 * 2, 1 * 2 * 3, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NonMutatingTest, LinearSearch) { + EXPECT_TRUE(absl::c_linear_search(container_, 3)); + EXPECT_FALSE(absl::c_linear_search(container_, 4)); +} + +TEST_F(NonMutatingTest, AllOf) { + const std::vector& v = vector_; + EXPECT_FALSE(absl::c_all_of(v, [](int x) { return x > 1; })); + EXPECT_TRUE(absl::c_all_of(v, [](int x) { return x > 0; })); +} + +TEST_F(NonMutatingTest, AnyOf) { + const std::vector& v = vector_; + EXPECT_TRUE(absl::c_any_of(v, [](int x) { return x > 2; })); + EXPECT_FALSE(absl::c_any_of(v, [](int x) { return x > 5; })); +} + +TEST_F(NonMutatingTest, NoneOf) { + const std::vector& v = vector_; + EXPECT_FALSE(absl::c_none_of(v, [](int x) { return x > 2; })); + EXPECT_TRUE(absl::c_none_of(v, [](int x) { return x > 5; })); +} + +TEST_F(NonMutatingTest, MinMaxElementLess) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_, std::less()); + EXPECT_TRUE(p.first == vector_.begin()); + EXPECT_TRUE(p.second == vector_.begin() + 2); +} + +TEST_F(NonMutatingTest, MinMaxElementGreater) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_, std::greater()); + EXPECT_TRUE(p.first == vector_.begin() + 2); + EXPECT_TRUE(p.second == vector_.begin()); +} + +TEST_F(NonMutatingTest, MinMaxElementNoPredicate) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_); + EXPECT_TRUE(p.first == vector_.begin()); + EXPECT_TRUE(p.second == vector_.begin() + 2); +} + +class SortingTest : public testing::Test { + protected: + std::list sorted_ = {1, 2, 3, 4}; + std::list unsorted_ = {2, 4, 1, 3}; + std::list reversed_ = {4, 3, 2, 1}; +}; + +TEST_F(SortingTest, IsSorted) { + EXPECT_TRUE(absl::c_is_sorted(sorted_)); + EXPECT_FALSE(absl::c_is_sorted(unsorted_)); + EXPECT_FALSE(absl::c_is_sorted(reversed_)); +} + +TEST_F(SortingTest, IsSortedWithPredicate) { + EXPECT_FALSE(absl::c_is_sorted(sorted_, std::greater())); + EXPECT_FALSE(absl::c_is_sorted(unsorted_, std::greater())); + EXPECT_TRUE(absl::c_is_sorted(reversed_, std::greater())); +} + +TEST_F(SortingTest, IsSortedUntil) { + EXPECT_EQ(1, *absl::c_is_sorted_until(unsorted_)); + EXPECT_EQ(4, *absl::c_is_sorted_until(unsorted_, std::greater())); +} + +TEST_F(SortingTest, NthElement) { + std::vector unsorted = {2, 4, 1, 3}; + absl::c_nth_element(unsorted, unsorted.begin() + 2); + EXPECT_THAT(unsorted, + ElementsAre(Lt(3), Lt(3), 3, Gt(3))); + absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater()); + EXPECT_THAT(unsorted, + ElementsAre(Gt(2), Gt(2), 2, Lt(2))); +} + +TEST(MutatingTest, IsPartitioned) { + EXPECT_TRUE( + absl::c_is_partitioned(std::vector{1, 3, 5, 2, 4, 6}, IsOdd)); + EXPECT_FALSE( + absl::c_is_partitioned(std::vector{1, 2, 3, 4, 5, 6}, IsOdd)); + EXPECT_FALSE( + absl::c_is_partitioned(std::vector{2, 4, 6, 1, 3, 5}, IsOdd)); +} + +TEST(MutatingTest, Partition) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_partition(actual, IsOdd); + EXPECT_THAT(actual, Truly([](const std::vector& c) { + return absl::c_is_partitioned(c, IsOdd); + })); +} + +TEST(MutatingTest, StablePartition) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_stable_partition(actual, IsOdd); + EXPECT_THAT(actual, ElementsAre(1, 3, 5, 2, 4)); +} + +TEST(MutatingTest, PartitionCopy) { + const std::vector initial = {1, 2, 3, 4, 5}; + std::vector odds, evens; + auto ends = absl::c_partition_copy(initial, back_inserter(odds), + back_inserter(evens), IsOdd); + *ends.first = 7; + *ends.second = 6; + EXPECT_THAT(odds, ElementsAre(1, 3, 5, 7)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); +} + +TEST(MutatingTest, PartitionPoint) { + const std::vector initial = {1, 3, 5, 2, 4}; + auto middle = absl::c_partition_point(initial, IsOdd); + EXPECT_EQ(2, *middle); +} + +TEST(MutatingTest, CopyMiddle) { + const std::vector initial = {4, -1, -2, -3, 5}; + const std::list input = {1, 2, 3}; + const std::vector expected = {4, 1, 2, 3, 5}; + + std::list test_list(initial.begin(), initial.end()); + absl::c_copy(input, ++test_list.begin()); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); + + std::vector test_vector = initial; + absl::c_copy(input, test_vector.begin() + 1); + EXPECT_EQ(expected, test_vector); +} + +TEST(MutatingTest, CopyFrontInserter) { + const std::list initial = {4, 5}; + const std::list input = {1, 2, 3}; + const std::list expected = {3, 2, 1, 4, 5}; + + std::list test_list = initial; + absl::c_copy(input, std::front_inserter(test_list)); + EXPECT_EQ(expected, test_list); +} + +TEST(MutatingTest, CopyBackInserter) { + const std::vector initial = {4, 5}; + const std::list input = {1, 2, 3}; + const std::vector expected = {4, 5, 1, 2, 3}; + + std::list test_list(initial.begin(), initial.end()); + absl::c_copy(input, std::back_inserter(test_list)); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); + + std::vector test_vector = initial; + absl::c_copy(input, std::back_inserter(test_vector)); + EXPECT_EQ(expected, test_vector); +} + +TEST(MutatingTest, CopyN) { + const std::vector initial = {1, 2, 3, 4, 5}; + const std::vector expected = {1, 2}; + std::vector actual; + absl::c_copy_n(initial, 2, back_inserter(actual)); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, CopyIf) { + const std::list input = {1, 2, 3}; + std::vector output; + absl::c_copy_if(input, std::back_inserter(output), + [](int i) { return i != 2; }); + EXPECT_THAT(output, ElementsAre(1, 3)); +} + +TEST(MutatingTest, CopyBackward) { + std::vector actual = {1, 2, 3, 4, 5}; + std::vector expected = {1, 2, 1, 2, 3}; + absl::c_copy_backward(absl::MakeSpan(actual.data(), 3), actual.end()); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Move) { + std::vector> src; + src.emplace_back(absl::make_unique(1)); + src.emplace_back(absl::make_unique(2)); + src.emplace_back(absl::make_unique(3)); + src.emplace_back(absl::make_unique(4)); + src.emplace_back(absl::make_unique(5)); + + std::vector> dest = {}; + absl::c_move(src, std::back_inserter(dest)); + EXPECT_THAT(src, Each(IsNull())); + EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(4), + Pointee(5))); +} + +TEST(MutatingTest, MoveBackward) { + std::vector> actual; + actual.emplace_back(absl::make_unique(1)); + actual.emplace_back(absl::make_unique(2)); + actual.emplace_back(absl::make_unique(3)); + actual.emplace_back(absl::make_unique(4)); + actual.emplace_back(absl::make_unique(5)); + auto subrange = absl::MakeSpan(actual.data(), 3); + absl::c_move_backward(subrange, actual.end()); + EXPECT_THAT(actual, ElementsAre(IsNull(), IsNull(), Pointee(1), Pointee(2), + Pointee(3))); +} + +TEST(MutatingTest, SwapRanges) { + std::vector odds = {2, 4, 6}; + std::vector evens = {1, 3, 5}; + absl::c_swap_ranges(odds, evens); + EXPECT_THAT(odds, ElementsAre(1, 3, 5)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); +} + +TEST_F(NonMutatingTest, Transform) { + std::vector x{0, 2, 4}, y, z; + auto end = absl::c_transform(x, back_inserter(y), std::negate()); + EXPECT_EQ(std::vector({0, -2, -4}), y); + *end = 7; + EXPECT_EQ(std::vector({0, -2, -4, 7}), y); + + y = {1, 3, 0}; + end = absl::c_transform(x, y, back_inserter(z), std::plus()); + EXPECT_EQ(std::vector({1, 5, 4}), z); + *end = 7; + EXPECT_EQ(std::vector({1, 5, 4, 7}), z); +} + +TEST(MutatingTest, Replace) { + const std::vector initial = {1, 2, 3, 1, 4, 5}; + const std::vector expected = {4, 2, 3, 4, 4, 5}; + + std::vector test_vector = initial; + absl::c_replace(test_vector, 1, 4); + EXPECT_EQ(expected, test_vector); + + std::list test_list(initial.begin(), initial.end()); + absl::c_replace(test_list, 1, 4); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); +} + +TEST(MutatingTest, ReplaceIf) { + std::vector actual = {1, 2, 3, 4, 5}; + const std::vector expected = {0, 2, 0, 4, 0}; + + absl::c_replace_if(actual, IsOdd, 0); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, ReplaceCopy) { + const std::vector initial = {1, 2, 3, 1, 4, 5}; + const std::vector expected = {4, 2, 3, 4, 4, 5}; + + std::vector actual; + absl::c_replace_copy(initial, back_inserter(actual), 1, 4); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Sort) { + std::vector test_vector = {2, 3, 1, 4}; + absl::c_sort(test_vector); + EXPECT_THAT(test_vector, ElementsAre(1, 2, 3, 4)); +} + +TEST(MutatingTest, SortWithPredicate) { + std::vector test_vector = {2, 3, 1, 4}; + absl::c_sort(test_vector, std::greater()); + EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); +} + +// For absl::c_stable_sort tests. Needs an operator< that does not cover all +// fields so that the test can check the sort preserves order of equal elements. +struct Element { + int key; + int value; + friend bool operator<(const Element& e1, const Element& e2) { + return e1.key < e2.key; + } + // Make gmock print useful diagnostics. + friend std::ostream& operator<<(std::ostream& o, const Element& e) { + return o << "{" << e.key << ", " << e.value << "}"; + } +}; + +MATCHER_P2(IsElement, key, value, "") { + return arg.key == key && arg.value == value; +} + +TEST(MutatingTest, StableSort) { + std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; + absl::c_stable_sort(test_vector); + EXPECT_THAT( + test_vector, + ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1), + IsElement(2, 0), IsElement(2, 2))); +} + +TEST(MutatingTest, StableSortWithPredicate) { + std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; + absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) { + return e2 < e1; + }); + EXPECT_THAT( + test_vector, + ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2), + IsElement(1, 1), IsElement(1, 0))); +} + +TEST(MutatingTest, ReplaceCopyIf) { + const std::vector initial = {1, 2, 3, 4, 5}; + const std::vector expected = {0, 2, 0, 4, 0}; + + std::vector actual; + absl::c_replace_copy_if(initial, back_inserter(actual), IsOdd, 0); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Fill) { + std::vector actual(5); + absl::c_fill(actual, 1); + EXPECT_THAT(actual, ElementsAre(1, 1, 1, 1, 1)); +} + +TEST(MutatingTest, FillN) { + std::vector actual(5, 0); + absl::c_fill_n(actual, 2, 1); + EXPECT_THAT(actual, ElementsAre(1, 1, 0, 0, 0)); +} + +TEST(MutatingTest, Generate) { + std::vector actual(5); + int x = 0; + absl::c_generate(actual, [&x]() { return ++x; }); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, GenerateN) { + std::vector actual(5, 0); + int x = 0; + absl::c_generate_n(actual, 3, [&x]() { return ++x; }); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 0, 0)); +} + +TEST(MutatingTest, RemoveCopy) { + std::vector actual; + absl::c_remove_copy(std::vector{1, 2, 3}, back_inserter(actual), 2); + EXPECT_THAT(actual, ElementsAre(1, 3)); +} + +TEST(MutatingTest, RemoveCopyIf) { + std::vector actual; + absl::c_remove_copy_if(std::vector{1, 2, 3}, back_inserter(actual), + IsOdd); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST(MutatingTest, UniqueCopy) { + std::vector actual; + absl::c_unique_copy(std::vector{1, 2, 2, 2, 3, 3, 2}, + back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 2)); +} + +TEST(MutatingTest, UniqueCopyWithPredicate) { + std::vector actual; + absl::c_unique_copy(std::vector{1, 2, 3, -1, -2, -3, 1}, + back_inserter(actual), + [](int x, int y) { return (x < 0) == (y < 0); }); + EXPECT_THAT(actual, ElementsAre(1, -1, 1)); +} + +TEST(MutatingTest, Reverse) { + std::vector test_vector = {1, 2, 3, 4}; + absl::c_reverse(test_vector); + EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); + + std::list test_list = {1, 2, 3, 4}; + absl::c_reverse(test_list); + EXPECT_THAT(test_list, ElementsAre(4, 3, 2, 1)); +} + +TEST(MutatingTest, ReverseCopy) { + std::vector actual; + absl::c_reverse_copy(std::vector{1, 2, 3, 4}, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(4, 3, 2, 1)); +} + +TEST(MutatingTest, Rotate) { + std::vector actual = {1, 2, 3, 4}; + auto it = absl::c_rotate(actual, actual.begin() + 2); + EXPECT_THAT(actual, testing::ElementsAreArray({3, 4, 1, 2})); + EXPECT_EQ(*it, 1); +} + +TEST(MutatingTest, RotateCopy) { + std::vector initial = {1, 2, 3, 4}; + std::vector actual; + auto end = + absl::c_rotate_copy(initial, initial.begin() + 2, back_inserter(actual)); + *end = 5; + EXPECT_THAT(actual, ElementsAre(3, 4, 1, 2, 5)); +} + +TEST(MutatingTest, Shuffle) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_shuffle(actual, std::random_device()); + EXPECT_THAT(actual, UnorderedElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, PartialSort) { + std::vector sequence{5, 3, 42, 0}; + absl::c_partial_sort(sequence, sequence.begin() + 2); + EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(0, 3)); + absl::c_partial_sort(sequence, sequence.begin() + 2, std::greater()); + EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(42, 5)); +} + +TEST(MutatingTest, PartialSortCopy) { + const std::vector initial = {5, 3, 42, 0}; + std::vector actual(2); + absl::c_partial_sort_copy(initial, actual); + EXPECT_THAT(actual, ElementsAre(0, 3)); + absl::c_partial_sort_copy(initial, actual, std::greater()); + EXPECT_THAT(actual, ElementsAre(42, 5)); +} + +TEST(MutatingTest, Merge) { + std::vector actual; + absl::c_merge(std::vector{1, 3, 5}, std::vector{2, 4}, + back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, MergeWithComparator) { + std::vector actual; + absl::c_merge(std::vector{5, 3, 1}, std::vector{4, 2}, + back_inserter(actual), std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); +} + +TEST(MutatingTest, InplaceMerge) { + std::vector actual = {1, 3, 5, 2, 4}; + absl::c_inplace_merge(actual, actual.begin() + 3); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, InplaceMergeWithComparator) { + std::vector actual = {5, 3, 1, 4, 2}; + absl::c_inplace_merge(actual, actual.begin() + 3, std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); +} + +class SetOperationsTest : public testing::Test { + protected: + std::vector a_ = {1, 2, 3}; + std::vector b_ = {1, 3, 5}; + + std::vector a_reversed_ = {3, 2, 1}; + std::vector b_reversed_ = {5, 3, 1}; +}; + +TEST_F(SetOperationsTest, SetUnion) { + std::vector actual; + absl::c_set_union(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 5)); +} + +TEST_F(SetOperationsTest, SetUnionWithComparator) { + std::vector actual; + absl::c_set_union(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 3, 2, 1)); +} + +TEST_F(SetOperationsTest, SetIntersection) { + std::vector actual; + absl::c_set_intersection(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 3)); +} + +TEST_F(SetOperationsTest, SetIntersectionWithComparator) { + std::vector actual; + absl::c_set_intersection(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(3, 1)); +} + +TEST_F(SetOperationsTest, SetDifference) { + std::vector actual; + absl::c_set_difference(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST_F(SetOperationsTest, SetDifferenceWithComparator) { + std::vector actual; + absl::c_set_difference(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST_F(SetOperationsTest, SetSymmetricDifference) { + std::vector actual; + absl::c_set_symmetric_difference(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(2, 5)); +} + +TEST_F(SetOperationsTest, SetSymmetricDifferenceWithComparator) { + std::vector actual; + absl::c_set_symmetric_difference(a_reversed_, b_reversed_, + back_inserter(actual), std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 2)); +} + +TEST(HeapOperationsTest, WithoutComparator) { + std::vector heap = {1, 2, 3}; + EXPECT_FALSE(absl::c_is_heap(heap)); + absl::c_make_heap(heap); + EXPECT_TRUE(absl::c_is_heap(heap)); + heap.push_back(4); + EXPECT_EQ(3, absl::c_is_heap_until(heap) - heap.begin()); + absl::c_push_heap(heap); + EXPECT_EQ(4, heap[0]); + absl::c_pop_heap(heap); + EXPECT_EQ(4, heap[3]); + absl::c_make_heap(heap); + absl::c_sort_heap(heap); + EXPECT_THAT(heap, ElementsAre(1, 2, 3, 4)); + EXPECT_FALSE(absl::c_is_heap(heap)); +} + +TEST(HeapOperationsTest, WithComparator) { + using greater = std::greater; + std::vector heap = {3, 2, 1}; + EXPECT_FALSE(absl::c_is_heap(heap, greater())); + absl::c_make_heap(heap, greater()); + EXPECT_TRUE(absl::c_is_heap(heap, greater())); + heap.push_back(0); + EXPECT_EQ(3, absl::c_is_heap_until(heap, greater()) - heap.begin()); + absl::c_push_heap(heap, greater()); + EXPECT_EQ(0, heap[0]); + absl::c_pop_heap(heap, greater()); + EXPECT_EQ(0, heap[3]); + absl::c_make_heap(heap, greater()); + absl::c_sort_heap(heap, greater()); + EXPECT_THAT(heap, ElementsAre(3, 2, 1, 0)); + EXPECT_FALSE(absl::c_is_heap(heap, greater())); +} + +TEST(MutatingTest, PermutationOperations) { + std::vector initial = {1, 2, 3, 4}; + std::vector permuted = initial; + + absl::c_next_permutation(permuted); + EXPECT_TRUE(absl::c_is_permutation(initial, permuted)); + EXPECT_TRUE(absl::c_is_permutation(initial, permuted, std::equal_to())); + + std::vector permuted2 = initial; + absl::c_prev_permutation(permuted2, std::greater()); + EXPECT_EQ(permuted, permuted2); + + absl::c_prev_permutation(permuted); + EXPECT_EQ(initial, permuted); +} + +} // namespace diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel new file mode 100644 index 000000000..87a6d3e65 --- /dev/null +++ b/absl/base/BUILD.bazel @@ -0,0 +1,369 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_TEST_COPTS", + "ABSL_EXCEPTIONS_FLAG", +) +load( + "//absl:test_dependencies.bzl", + "GUNIT_MAIN_DEPS_SELECTOR", + "GUNIT_MAIN_NO_LEAK_CHECK_DEPS_SELECTOR", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +# Some header files in //base are directly exported for unusual use cases, +# and the ABSL versions must also be exported for those users. + +exports_files(["thread_annotations.h"]) + +cc_library( + name = "spinlock_wait", + srcs = [ + "internal/spinlock_posix.inc", + "internal/spinlock_wait.cc", + "internal/spinlock_win32.inc", + ], + hdrs = [ + "internal/scheduling_mode.h", + "internal/spinlock_wait.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [":core_headers"], +) + +cc_library( + name = "config", + hdrs = [ + "config.h", + "policy_checks.h", + ], + copts = ABSL_DEFAULT_COPTS, +) + +cc_library( + name = "dynamic_annotations", + srcs = ["dynamic_annotations.cc"], + hdrs = ["dynamic_annotations.h"], + copts = ABSL_DEFAULT_COPTS, + defines = ["__CLANG_SUPPORT_DYN_ANNOTATION__"], +) + +cc_library( + name = "core_headers", + hdrs = [ + "attributes.h", + "macros.h", + "optimization.h", + "port.h", + "thread_annotations.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":config", + ":dynamic_annotations", + ], +) + +cc_library( + name = "malloc_extension", + srcs = ["internal/malloc_extension.cc"], + hdrs = [ + "internal/malloc_extension.h", + "internal/malloc_extension_c.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":core_headers", + ":dynamic_annotations", + ], +) + +# malloc_extension feels like it wants to be folded into this target, but +# malloc_internal gets special build treatment to compile at -O3, so these +# need to stay separate. +cc_library( + name = "malloc_internal", + srcs = [ + "internal/low_level_alloc.cc", + "internal/malloc_hook.cc", + "internal/malloc_hook_mmap_linux.inc", + ], + hdrs = [ + "internal/low_level_alloc.h", + "internal/malloc_hook.h", + "internal/malloc_hook_c.h", + ], + copts = ABSL_DEFAULT_COPTS, + textual_hdrs = [ + "internal/malloc_hook_invoke.h", + ], + deps = [ + ":base", + ":config", + ":core_headers", + ":dynamic_annotations", + ], +) + +cc_library( + name = "base_internal", + hdrs = [ + "internal/identity.h", + "internal/invoke.h", + ], + copts = ABSL_DEFAULT_COPTS, +) + +cc_library( + name = "base", + srcs = [ + "internal/cycleclock.cc", + "internal/raw_logging.cc", + "internal/spinlock.cc", + "internal/sysinfo.cc", + "internal/thread_identity.cc", + "internal/unscaledcycleclock.cc", + ], + hdrs = [ + "call_once.h", + "casts.h", + "internal/atomic_hook.h", + "internal/cycleclock.h", + "internal/log_severity.h", + "internal/low_level_scheduling.h", + "internal/per_thread_tls.h", + "internal/raw_logging.h", + "internal/spinlock.h", + "internal/sysinfo.h", + "internal/thread_identity.h", + "internal/tsan_mutex_interface.h", + "internal/unscaledcycleclock.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":base_internal", + ":config", + ":core_headers", + ":dynamic_annotations", + ":spinlock_wait", + ], +) + +cc_test( + name = "bit_cast_test", + size = "small", + srcs = [ + "bit_cast_test.cc", + ], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + ":core_headers", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_library( + name = "throw_delegate", + srcs = ["internal/throw_delegate.cc"], + hdrs = ["internal/throw_delegate.h"], + copts = ABSL_DEFAULT_COPTS + ABSL_EXCEPTIONS_FLAG, + features = [ + "-use_header_modules", # b/33207452 + ], + deps = [ + ":base", + ":config", + ], +) + +cc_test( + name = "throw_delegate_test", + srcs = ["throw_delegate_test.cc"], + copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + deps = [ + ":throw_delegate", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_library( + name = "exception_testing", + testonly = 1, + hdrs = ["internal/exception_testing.h"], + copts = ABSL_TEST_COPTS, + deps = [ + ":config", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "invoke_test", + size = "small", + srcs = ["invoke_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base_internal", + "//absl/strings", + "//absl/memory", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +# Common test library made available for use in non-absl code that overrides +# AbslInternalSpinLockDelay and AbslInternalSpinLockWake. +cc_library( + name = "spinlock_test_common", + testonly = 1, + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + "//absl/synchronization", + "@com_google_googletest//:gtest", + ], + alwayslink = 1, +) + +cc_test( + name = "spinlock_test", + size = "medium", + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + "//absl/synchronization", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "endian", + hdrs = [ + "internal/endian.h", + "internal/unaligned_access.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "endian_test", + srcs = ["internal/endian_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + ":config", + ":endian", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "config_test", + srcs = ["config_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":config", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "call_once_test", + srcs = ["call_once_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "raw_logging_test", + srcs = ["raw_logging_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "sysinfo_test", + size = "small", + srcs = ["internal/sysinfo_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":base", + "//absl/synchronization", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "low_level_alloc_test", + size = "small", + srcs = ["internal/low_level_alloc_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = select({ + "//absl:windows": [], + "//conditions:default": ["-pthread"], + }), + deps = [":malloc_internal"], +) + +cc_test( + name = "thread_identity_test", + size = "small", + srcs = ["internal/thread_identity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = select({ + "//absl:windows": [], + "//conditions:default": ["-pthread"], + }), + deps = [ + ":base", + "//absl/synchronization", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "malloc_extension_system_malloc_test", + size = "small", + srcs = ["internal/malloc_extension_test.cc"], + copts = select({ + "//absl:windows": [ + "/DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1", + ], + "//conditions:default": [ + "-DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1", + ], + }) + ABSL_TEST_COPTS, + features = [ + # This test can't be run under lsan because the test requires system + # malloc, and lsan provides a competing malloc implementation. + "-leak_sanitize", + ], + deps = [ + ":malloc_extension", + ] + select(GUNIT_MAIN_NO_LEAK_CHECK_DEPS_SELECTOR), +) diff --git a/absl/base/attributes.h b/absl/base/attributes.h new file mode 100644 index 000000000..5eecdabed --- /dev/null +++ b/absl/base/attributes.h @@ -0,0 +1,469 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Various macros for C++ attributes +// This file is used for both C and C++! +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. +// However, some of them are not supported in older version of Clang. +// Thus, we check __has_attribute() first. If the check fails, we check if we +// are on GCC and assume the attribute exists on GCC (which is verified on GCC +// 4.7). +// +// For sanitizer-related attributes, define the following macros +// using -D along with the given value for -fsanitize: +// - ADDRESS_SANITIZER with -fsanitize=address (GCC 4.8+, Clang) +// - MEMORY_SANITIZER with -fsanitize=memory (Clang) +// - THREAD_SANITIZER with -fsanitize=thread (GCC 4.8+, Clang) +// - UNDEFINED_BEHAVIOR_SANITIZER with -fsanitize=undefined (GCC 4.9+, Clang) +// - CONTROL_FLOW_INTEGRITY with -fsanitize=cfi (Clang) +// Since these are only supported by GCC and Clang now, we only check for +// __GNUC__ (GCC or Clang) and the above macros. +#ifndef ABSL_BASE_ATTRIBUTES_H_ +#define ABSL_BASE_ATTRIBUTES_H_ + +// ABSL_HAVE_ATTRIBUTE is a function-like feature checking macro. +// It's a wrapper around __has_attribute, which is defined by GCC 5+ and Clang. +// It evaluates to a nonzero constant integer if the attribute is supported +// or 0 if not. +// It evaluates to zero if __has_attribute is not defined by the compiler. +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define ABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// ABSL_HAVE_CPP_ATTRIBUTE is a function-like feature checking macro that +// accepts C++11 style attributes. It's a wrapper around __has_cpp_attribute, +// defined by ISO C++ SD-6 +// (http://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find __has_cpp_attribute, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// ABSL_PRINTF_ATTRIBUTE, ABSL_SCANF_ATTRIBUTE +// Tell the compiler to do printf format std::string checking if the +// compiler supports it; see the 'format' attribute in +// . +// +// N.B.: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// ABSL_ATTRIBUTE_ALWAYS_INLINE, ABSL_ATTRIBUTE_NOINLINE +// For functions we want to force inline or not inline. +// Introduced in gcc 3.1. +#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define ABSL_ATTRIBUTE_NOINLINE +#endif + +// ABSL_ATTRIBUTE_NO_TAIL_CALL +// Prevent the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define ABSL_ATTRIBUTE_NO_TAIL_CALL +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif +// ABSL_ATTRIBUTE_WEAK +// For weak functions +#if ABSL_HAVE_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_WEAK +#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define ABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define ABSL_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif +// ABSL_ATTRIBUTE_NONNULL +// Tell the compiler either that a particular function parameter +// should be a non-null pointer, or that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// For non-static class member functions, the implicit "this" argument +// is arg 1, and the first explicit argument is arg 2. +// For static class member functions, there is no implicit "this", and +// the first explicit argument is arg 1. +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// ABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// ABSL_ATTRIBUTE_NONNULL does not. +#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define ABSL_ATTRIBUTE_NONNULL(...) +#endif +// ABSL_ATTRIBUTE_NORETURN +// Tell the compiler that a given function never returns +#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define ABSL_ATTRIBUTE_NORETURN +#endif +// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// Tell AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) && defined(ADDRESS_SANITIZER) +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// Tell MemorySanitizer to relax the handling of a given function. All "Use of +// uninitialized value" warnings from such functions will be suppressed, and all +// values loaded from memory will be considered fully initialized. +// This is similar to the ADDRESS_SANITIZER attribute above, but deals with +// initializedness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if defined(__GNUC__) && defined(MEMORY_SANITIZER) +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// Tell ThreadSanitizer to not instrument a given function. +// If you are adding this attribute, please cc dynamic-tools@ on the cl. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) && defined(THREAD_SANITIZER) +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// Tell UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. devision by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if defined(__GNUC__) && \ + (defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER)) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_CFI +// Tell ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY) +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// ABSL_ATTRIBUTE_SECTION +// Labeled sections are not supported on Darwin/iOS. +#ifdef ABSL_HAVE_ATTRIBUTE_SECTION +#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (ABSL_HAVE_ATTRIBUTE(section) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) +#define ABSL_HAVE_ATTRIBUTE_SECTION 1 +// +// Tell the compiler/linker to put a given function into a section and define +// "__start_ ## name" and "__stop_ ## name" symbols to bracket the section. +// This functionality is supported by GNU linker. +// Any function with ABSL_ATTRIBUTE_SECTION must not be inlined, or it will +// be placed into whatever section its caller is placed into. +// +#ifndef ABSL_ATTRIBUTE_SECTION +#define ABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif +// Tell the compiler/linker to put a given variable into a section and define +// "__start_ ## name" and "__stop_ ## name" symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif +// +// Weak section declaration to be used as a global declaration +// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with ABSL_ATTRIBUTE_SECTION(name). +// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#endif +#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// Return void* pointers to start/end of a section of code with +// functions having ABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define ABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast(__start_##name)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast(__stop_##name)) +#else // !ABSL_HAVE_ATTRIBUTE_SECTION + +#define ABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define ABSL_ATTRIBUTE_SECTION(name) +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) +#endif // ABSL_ATTRIBUTE_SECTION + +// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// Support for aligning the stack on 32-bit x86. +#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// ABSL_MUST_USE_RESULT +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro must appear as the very first part of a function +// declaration or definition: +// +// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// +// This placement has the broadest compatibility with GCC, Clang, and MSVC, with +// both defs and decls, and with GCC-style attributes, MSVC declspec, C++11 +// and C++17 attributes. +// +// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +#if ABSL_HAVE_ATTRIBUTE(nodiscard) +#define ABSL_MUST_USE_RESULT [[nodiscard]] +#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD +// Tell GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations, e.g.: +// int foo() ABSL_ATTRIBUTE_HOT; +#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define ABSL_ATTRIBUTE_HOT +#endif + +#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define ABSL_ATTRIBUTE_COLD +#endif + +// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS +// +// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// - The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// - The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// - The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(ABSL_NO_XRAY_ATTRIBUTES) +#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define ABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define ABSL_XRAY_ALWAYS_INSTRUMENT +#define ABSL_XRAY_NEVER_INSTRUMENT +#define ABSL_XRAY_LOG_ARGS(N) +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// ABSL_ATTRIBUTE_UNUSED +// Prevent the compiler from complaining about or optimizing away variables +// that appear unused. +#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_UNUSED +#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ABSL_ATTRIBUTE_UNUSED +#endif +// ABSL_ATTRIBUTE_INITIAL_EXEC +// Tell the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define ABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// ABSL_ATTRIBUTE_PACKED +// Prevent the compiler from padding a structure to natural alignment +#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define ABSL_ATTRIBUTE_PACKED +#endif + +// ABSL_CONST_INIT +// A variable declaration annotated with the ABSL_CONST_INIT attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". +// +// Sample usage: +// +// ABSL_CONST_INIT static MyType my_var = MakeMyType(...); +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +// NOLINTNEXTLINE(whitespace/braces) (b/36288871) +#define ABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define ABSL_CONST_INIT +#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) + +#endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/absl/base/bit_cast_test.cc b/absl/base/bit_cast_test.cc new file mode 100644 index 000000000..8cd878d75 --- /dev/null +++ b/absl/base/bit_cast_test.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Unit test for bit_cast template. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" + +namespace absl { +namespace { + +template +struct marshall { char buf[N]; }; + +template +void TestMarshall(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + marshall m0 = absl::bit_cast >(t0); + T t1 = absl::bit_cast(m0); + marshall m1 = absl::bit_cast >(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T))); + } +} + +// Convert back and forth to an integral type. The C++ standard does +// not guarantee this will work, but we test that this works on all the +// platforms we support. +// +// Likewise, we below make assumptions about sizeof(float) and +// sizeof(double) which the standard does not guarantee, but which hold on the +// platforms we support. + +template +void TestIntegral(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + I i0 = absl::bit_cast(t0); + T t1 = absl::bit_cast(i0); + I i1 = absl::bit_cast(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(i0, i1); + } +} + +TEST(BitCast, Bool) { + static const bool bool_list[] = { false, true }; + TestMarshall(bool_list, ABSL_ARRAYSIZE(bool_list)); +} + +TEST(BitCast, Int32) { + static const int32_t int_list[] = + { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 }; + TestMarshall(int_list, ABSL_ARRAYSIZE(int_list)); +} + +TEST(BitCast, Int64) { + static const int64_t int64_list[] = + { 0, 1, 1LL << 40, -1, -(1LL<<40) }; + TestMarshall(int64_list, ABSL_ARRAYSIZE(int64_list)); +} + +TEST(BitCast, Uint64) { + static const uint64_t uint64_list[] = + { 0, 1, 1LLU << 40, 1LLU << 63 }; + TestMarshall(uint64_list, ABSL_ARRAYSIZE(uint64_list)); +} + +TEST(BitCast, Float) { + static const float float_list[] = + { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f, + 1e10f, 1e20f, 1e-10f, 1e-20f, + 2.71828f, 3.14159f }; + TestMarshall(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); +} + +TEST(BitCast, Double) { + static const double double_list[] = + { 0.0, 1.0, -1.0, 10.0, -10.0, + 1e10, 1e100, 1e-10, 1e-100, + 2.718281828459045, + 3.141592653589793238462643383279502884197169399375105820974944 }; + TestMarshall(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); +} + +} // namespace +} // namespace absl diff --git a/absl/base/call_once.h b/absl/base/call_once.h new file mode 100644 index 000000000..478a41c1f --- /dev/null +++ b/absl/base/call_once.h @@ -0,0 +1,210 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: call_once.h +// ----------------------------------------------------------------------------- +// +// This header file provides an Abseil version of `std::call_once` for invoking +// a given function at most once, across all threads. This Abseil version is +// faster than the C++11 version and incorporates the C++17 argument-passing +// fix, so that (for example) non-const references may be passed to the invoked +// function. + +#ifndef ABSL_BASE_CALL_ONCE_H_ +#define ABSL_BASE_CALL_ONCE_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock_wait.h" + +namespace absl { + +class once_flag; + +namespace base_internal { +// Implementation detail. +std::atomic* ControlWord(absl::once_flag* flag); +} // namespace base_internal + +// call_once() +// +// For all invocations using a given `once_flag`, invokes a given `fn` exactly +// once across all threads. The first call to `call_once()` with a particular +// `once_flag` argument (that does not throw an exception) will run the +// specified function with the provided `args`; other calls with the same +// `once_flag` argument will not run the function, but will wait +// for the provided function to finish running (if it is still running). +// +// This mechanism provides a safe, simple, and fast mechanism for one-time +// initialization in a multi-threaded process. +// +// Example: +// +// class MyInitClass { +// public: +// ... +// mutable absl::once_flag once_; +// +// MyInitClass* init() const { +// absl::call_once(once_, &MyInitClass::Init, this); +// return ptr_; +// } +// +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + +// once_flag +// +// Objects of this type are used to distinguish calls to `call_once()` and +// ensure the provided function is only invoked once across all threads. This +// type is not copyable or movable. However, it has a `constexpr` +// constructor, and is safe to use as a namespace-scoped global variable. +class once_flag { + public: + constexpr once_flag() : control_(0) {} + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend std::atomic* base_internal::ControlWord(once_flag* flag); + std::atomic control_; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +// Implementation details follow. +//------------------------------------------------------------------------------ + +namespace base_internal { + +// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to +// initialize entities used by the scheduler implementation. +template +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); + +// Disables scheduling while on stack when scheduling mode is non-cooperative. +// No effect for cooperative scheduling modes. +class SchedulingHelper { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_; +}; + +// Bit patterns for call_once state machine values. Internal implementation +// detail, not for use by clients. +// +// The bit patterns are arbitrarily chosen from unlikely values, to aid in +// debugging. However, kOnceInit must be 0, so that a zero-initialized +// once_flag will be valid for immediate use. +enum { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + kOnceDone = 0x3F2D8AB0, +}; + +template +void CallOnceImpl(std::atomic* control, + base_internal::SchedulingMode scheduling_mode, Callable&& fn, + Args&&... args) { +#ifndef NDEBUG + { + uint32_t old_control = control->load(std::memory_order_acquire); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) { + ABSL_RAW_LOG( + FATAL, + "Unexpected value for control word: %d. Either the control word " + "has non-static storage duration (where GoogleOnceDynamic might " + "be appropriate), or there's been a memory corruption.", + old_control); + } + } +#endif // NDEBUG + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, + std::memory_order_acquire, + std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, + scheduling_mode) == kOnceInit) { + base_internal::Invoke(std::forward(fn), + std::forward(args)...); + old_control = control->load(std::memory_order_relaxed); + control->store(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone +} + +inline std::atomic* ControlWord(once_flag* flag) { + return &flag->control_; +} + +template +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, + std::forward(fn), + std::forward(args)...); + } +} + +} // namespace base_internal + +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, + std::forward(fn), std::forward(args)...); + } +} + +} // namespace absl + +#endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/absl/base/call_once_test.cc b/absl/base/call_once_test.cc new file mode 100644 index 000000000..de235432b --- /dev/null +++ b/absl/base/call_once_test.cc @@ -0,0 +1,102 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/call_once.h" + +#include +#include + +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" +#include "gtest/gtest.h" + +namespace absl { +namespace { + +absl::once_flag once; +Mutex counters_mu; + +int running_thread_count GUARDED_BY(counters_mu) = 0; +int call_once_invoke_count GUARDED_BY(counters_mu) = 0; +int call_once_finished_count GUARDED_BY(counters_mu) = 0; +int call_once_return_count GUARDED_BY(counters_mu) = 0; +bool done_blocking GUARDED_BY(counters_mu) = false; + +// Function to be called from absl::call_once. Waits for a notification. +void WaitAndIncrement() { + counters_mu.Lock(); + ++call_once_invoke_count; + counters_mu.Unlock(); + + counters_mu.LockWhen(Condition(&done_blocking)); + ++call_once_finished_count; + counters_mu.Unlock(); +} + +void ThreadBody() { + counters_mu.Lock(); + ++running_thread_count; + counters_mu.Unlock(); + + absl::call_once(once, WaitAndIncrement); + + counters_mu.Lock(); + ++call_once_return_count; + counters_mu.Unlock(); +} + +// Returns true if all threads are set up for the test. +bool ThreadsAreSetup(void*) EXCLUSIVE_LOCKS_REQUIRED(counters_mu) { + // All ten threads must be running, and WaitAndIncrement should be blocked. + return running_thread_count == 10 && call_once_invoke_count == 1; +} + +TEST(CallOnceTest, ExecutionCount) { + std::vector threads; + + // Start 10 threads all calling call_once on the same once_flag. + for (int i = 0; i < 10; ++i) { + threads.emplace_back(ThreadBody); + } + + + // Wait until all ten threads have started, and WaitAndIncrement has been + // invoked. + counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr)); + + // WaitAndIncrement should have been invoked by exactly one call_once() + // instance. That thread should be blocking on a notification, and all other + // call_once instances should be blocking as well. + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 0); + EXPECT_EQ(call_once_return_count, 0); + + // Allow WaitAndIncrement to finish executing. Once it does, the other + // call_once waiters will be unblocked. + done_blocking = true; + counters_mu.Unlock(); + + for (std::thread& thread : threads) { + thread.join(); + } + + counters_mu.Lock(); + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 1); + EXPECT_EQ(call_once_return_count, 10); + counters_mu.Unlock(); +} + +} // namespace +} // namespace absl diff --git a/absl/base/casts.h b/absl/base/casts.h new file mode 100644 index 000000000..2a0adc295 --- /dev/null +++ b/absl/base/casts.h @@ -0,0 +1,141 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. +// + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include +#include + +#include "absl/base/internal/identity.h" + +namespace absl { + +// implicit_cast() +// +// Performs an implicit conversion between types following the language +// rules for implicit conversion; if an implicit conversion is otherwise +// allowed by the language in the given context, this function performs such an +// implicit conversion. +// +// Example: +// +// // If the context allows implicit conversion: +// From from; +// To to = from; +// +// // Such code can be replaced by: +// implicit_cast(from); +// +// An `implicit_cast()` may also be used to annotate numeric type conversions +// that, although safe, may produce compiler warnings (such as `long` to `int`). +// Additionally, an `implict_cast()` is also useful within return statements to +// indicate a specific implicit conversion is being undertaken. +// +// Example: +// +// return implicit_cast(size_in_bytes) / capacity_; +// +// Annotating code with `implicit_cast()` allows you to explicitly select +// particular overloads and template instantiations, while providing a safer +// cast than `reinterpret_cast()` or `static_cast()`. +// +// Additionally, an `implicit_cast()` can be used to allow upcasting within a +// type hierarchy where incorrect use of `static_cast()` could accidentally +// allow downcasting. +// +// Finally, an `implicit_cast()` can be used to perform implicit conversions +// from unrelated types that otherwise couldn't be implicitly cast directly; +// C++ will normally only implicitly cast "one step" in such conversions. +// +// That is, if C is a type which can be implicitly converted to B, with B being +// a type that can be implicitly converted to A, an `implicit_cast()` can be +// used to convert C to B (which the compiler can then implicitly convert to A +// using language rules). +// +// Example: +// +// // Assume an object C is convertible to B, which is implicitly convertible +// // to A +// A a = implicit_cast(C); +// +// Such implicit cast chaining may be useful within template logic. +template +inline To implicit_cast(typename absl::internal::identity_t to) { + return to; +} + +// bit_cast() +// +// Performs a bitwise cast on a type without changing the underlying bit +// representation of that type's value. The two types must be of the same size +// and both types must be trivially copyable. As with most casts, use with +// caution. A `bit_cast()` might be needed when you need to temporarily treat a +// type as some other type, such as in the following cases: +// +// * Serialization (casting temporarily to `char *` for those purposes is +// always allowed by the C++ standard) +// * Managing the individual bits of a type within mathematical operations +// that are not normally accessible through that type +// * Casting non-pointer types to pointer types (casting the other way is +// allowed by `reinterpret_cast()` but round-trips cannot occur the other +// way). +// +// Example: +// +// float f = 3.14159265358979; +// int i = bit_cast(f); +// // i = 0x40490fdb +// +// Casting non-pointer types to pointer types and then dereferencing them +// traditionally produces undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; // WRONG +// int i = * reinterpret_cast(&f); // WRONG +// +// The address-casting method produces undefined behavior according to the ISO +// C++ specification section [basic.lval]. Roughly, this section says: if an +// object in memory has one type, and a program accesses it with a different +// type, the result is undefined behavior for most values of "different type". +// +// Such casting results is type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by implementating its casts using `memcpy()`, which avoids introducing +// this undefined behavior. +template +inline Dest bit_cast(const Source& source) { + static_assert(sizeof(Dest) == sizeof(Source), + "Source and destination types should have equal sizes."); + + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/absl/base/config.h b/absl/base/config.h new file mode 100644 index 000000000..cf47f84c3 --- /dev/null +++ b/absl/base/config.h @@ -0,0 +1,367 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef ABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //ABSL_HAVE_MMAP +// +// ... +// #ifdef ABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // ABSL_HAVE_MMAP + +#ifndef ABSL_BASE_CONFIG_H_ +#define ABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include +#endif // __cplusplus + +#include "absl/base/policy_checks.h" + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// ABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define ABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ABSL_HAVE_BUILTIN(x) 0 +#endif + +// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux when compiled with Clang or compiled +// against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef ABSL_HAVE_TLS +#error ABSL_HAVE_TLS cannot be directly set +#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define ABSL_HAVE_TLS 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include() +#include +#endif +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__NDK_MAJOR__) && defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef ABSL_HAVE_TLS +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible` is supported. +// +// Notes: All supported compilers using libc++ support this feature, as does +// gcc >= 4.8.1 using libstdc++, and Visual Studio. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#elif defined(_LIBCPP_VERSION) || \ + (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \ + defined(_MSC_VER) +#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible` and +// `std::is_trivially_copy_constructible` are supported. + +// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable` is supported. + +// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with +// either libc++ or libstdc++, and Visual Studio. +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (!defined(__clang__) && defined(__GNUC__) && \ + (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ + defined(_MSC_VER) +#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// ABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +// +// Notes: Clang implements the `thread_local` keyword but Xcode did not support +// the implementation until Xcode 8. +#ifdef ABSL_HAVE_THREAD_LOCAL +#error ABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif !defined(__apple_build_version__) || __apple_build_version__ >= 8000042 +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif + +// ABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Notes: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, except on ppc64 and aarch64 where __int128 exists but has exhibits +// a sporadic compiler crashing bug. Nvidia's nvcc also defines __GNUC__ and +// __SIZEOF_INT128__ but not all versions actually support __int128. +#ifdef ABSL_HAVE_INTRINSIC_INT128 +#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif (defined(__clang__) && defined(__SIZEOF_INT128__) && \ + !defined(__ppc64__) && !defined(__aarch64__)) || \ + (defined(__CUDACC__) && defined(__SIZEOF_INT128__) && \ + __CUDACC_VER__ >= 70000) || \ + (!defined(__clang__) && !defined(__CUDACC__) && defined(__GNUC__) && \ + defined(__SIZEOF_INT128__)) +#define ABSL_HAVE_INTRINSIC_INT128 1 +#endif + +// ABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when ABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef ABSL_HAVE_EXCEPTIONS +#error ABSL_HAVE_EXCEPTIONS cannot be directly set. + +#elif defined(__clang__) +// TODO(calabrese) +// Switch to using __cpp_exceptions when we no longer support versions < 3.6. +// For details on this check, see: +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) + +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (Mac OS X and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// Fuschia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// ABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_MMAP +#error ABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__ros__) || \ + defined(__native_client__) || defined(__asmjs__) || defined(__Fuchsia__) +#define ABSL_HAVE_MMAP 1 +#endif + +// ABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__ros__) +#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// ABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_SCHED_YIELD +#error ABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) +#define ABSL_HAVE_SCHED_YIELD 1 +#endif + +// ABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the header and sem_open(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides for both iOS and macOS, it is +// explicity deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef ABSL_HAVE_SEMAPHORE_H +#error ABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) +#define ABSL_HAVE_SEMAPHORE_H 1 +#endif + +// ABSL_HAVE_ALARM +// +// Checks whether the platform supports the header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef ABSL_HAVE_ALARM +#error ABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define ABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define ABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__native_client__) +#else +// other standard libraries +#define ABSL_HAVE_ALARM 1 +#endif + +// ABSL_IS_LITTLE_ENDIAN +// ABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(ABSL_IS_BIG_ENDIAN) +#error "ABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(ABSL_IS_LITTLE_ENDIAN) +#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define ABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define ABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define ABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// ABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is availble by checking whether exists. +#ifdef ABSL_HAVE_STD_ANY +#error "ABSL_HAVE_STD_ANY cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L +#define ABSL_HAVE_STD_ANY 1 +#endif +#endif + +// ABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef ABSL_HAVE_STD_OPTIONAL +#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L +#define ABSL_HAVE_STD_OPTIONAL 1 +#endif +#endif + +// ABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef ABSL_HAVE_STD_STRING_VIEW +#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif +#endif + +#endif // ABSL_BASE_CONFIG_H_ diff --git a/absl/base/config_test.cc b/absl/base/config_test.cc new file mode 100644 index 000000000..578bb810b --- /dev/null +++ b/absl/base/config_test.cc @@ -0,0 +1,45 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +TEST(ConfigTest, Endianness) { + union + { + uint32_t value; + uint8_t data[sizeof(uint32_t)]; + } number; + number.data[0] = 0x00; + number.data[1] = 0x01; + number.data[2] = 0x02; + number.data[3] = 0x03; +#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN) +#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined +#elif defined(ABSL_IS_LITTLE_ENDIAN) + EXPECT_EQ(UINT32_C(0x03020100), number.value); +#elif defined(ABSL_IS_BIG_ENDIAN) + EXPECT_EQ(UINT32_C(0x00010203), number.value); +#else +#error Unknown endianness +#endif +} + +} // namespace diff --git a/absl/base/dynamic_annotations.cc b/absl/base/dynamic_annotations.cc new file mode 100644 index 000000000..08c27e511 --- /dev/null +++ b/absl/base/dynamic_annotations.cc @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "absl/base/dynamic_annotations.h" + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +/* Compiler-based ThreadSanitizer defines + DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1 + and provides its own definitions of the functions. */ + +#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL +# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0 +#endif + +/* Each function is empty and called (via a macro) only in debug mode. + The arguments are captured by dynamic tools at runtime. */ + +#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__) + +#if __has_feature(memory_sanitizer) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void AnnotateRWLockCreate(const char *, int, + const volatile void *){} +void AnnotateRWLockDestroy(const char *, int, + const volatile void *){} +void AnnotateRWLockAcquired(const char *, int, + const volatile void *, long){} +void AnnotateRWLockReleased(const char *, int, + const volatile void *, long){} +void AnnotateBenignRace(const char *, int, + const volatile void *, + const char *){} +void AnnotateBenignRaceSized(const char *, int, + const volatile void *, + size_t, + const char *) {} +void AnnotateThreadName(const char *, int, + const char *){} +void AnnotateIgnoreReadsBegin(const char *, int){} +void AnnotateIgnoreReadsEnd(const char *, int){} +void AnnotateIgnoreWritesBegin(const char *, int){} +void AnnotateIgnoreWritesEnd(const char *, int){} +void AnnotateEnableRaceDetection(const char *, int, int){} +void AnnotateMemoryIsInitialized(const char *, int, + const volatile void *mem, size_t size) { +#if __has_feature(memory_sanitizer) + __msan_unpoison(mem, size); +#else + (void)mem; + (void)size; +#endif +} + +void AnnotateMemoryIsUninitialized(const char *, int, + const volatile void *mem, size_t size) { +#if __has_feature(memory_sanitizer) + __msan_allocated_memory(mem, size); +#else + (void)mem; + (void)size; +#endif +} + +static int GetRunningOnValgrind(void) { +#ifdef RUNNING_ON_VALGRIND + if (RUNNING_ON_VALGRIND) return 1; +#endif + char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND"); + if (running_on_valgrind_str) { + return strcmp(running_on_valgrind_str, "0") != 0; + } + return 0; +} + +/* See the comments in dynamic_annotations.h */ +int RunningOnValgrind(void) { + static volatile int running_on_valgrind = -1; + int local_running_on_valgrind = running_on_valgrind; + /* C doesn't have thread-safe initialization of statics, and we + don't want to depend on pthread_once here, so hack it. */ + ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack"); + if (local_running_on_valgrind == -1) + running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind(); + return local_running_on_valgrind; +} + +/* See the comments in dynamic_annotations.h */ +double ValgrindSlowdown(void) { + /* Same initialization hack as in RunningOnValgrind(). */ + static volatile double slowdown = 0.0; + double local_slowdown = slowdown; + ANNOTATE_BENIGN_RACE(&slowdown, "safe hack"); + if (RunningOnValgrind() == 0) { + return 1.0; + } + if (local_slowdown == 0.0) { + char *env = getenv("VALGRIND_SLOWDOWN"); + slowdown = local_slowdown = env ? atof(env) : 50.0; + } + return local_slowdown; +} + +#ifdef __cplusplus +} // extern "C" +#endif +#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */ diff --git a/absl/base/dynamic_annotations.h b/absl/base/dynamic_annotations.h new file mode 100644 index 000000000..b9c015ba7 --- /dev/null +++ b/absl/base/dynamic_annotations.h @@ -0,0 +1,409 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* This file defines dynamic annotations for use with dynamic analysis + tool such as valgrind, PIN, etc. + + Dynamic annotation is a source code annotation that affects + the generated code (that is, the annotation is not a comment). + Each such annotation is attached to a particular + instruction and/or to a particular object (address) in the program. + + The annotations that should be used by users are macros in all upper-case + (e.g., ANNOTATE_THREAD_NAME). + + Actual implementation of these macros may differ depending on the + dynamic analysis tool being used. + + This file supports the following configurations: + - Dynamic Annotations enabled (with static thread-safety warnings disabled). + In this case, macros expand to functions implemented by Thread Sanitizer, + when building with TSan. When not provided an external implementation, + dynamic_annotations.cc provides no-op implementations. + + - Static Clang thread-safety warnings enabled. + When building with a Clang compiler that supports thread-safety warnings, + a subset of annotations can be statically-checked at compile-time. We + expand these macros to static-inline functions that can be analyzed for + thread-safety, but afterwards elided when building the final binary. + + - All annotations are disabled. + If neither Dynamic Annotations nor Clang thread-safety warnings are + enabled, then all annotation-macros expand to empty. */ + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +# define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if defined(__native_client__) + #include "nacl/dynamic_annotations.h" + + // Stub out the macros missing from the NaCl version. + #ifndef ANNOTATE_CONTIGUOUS_CONTAINER + #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) + #endif + #ifndef ANNOTATE_RWLOCK_CREATE_STATIC + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) + #endif + #ifndef ADDRESS_SANITIZER_REDZONE + #define ADDRESS_SANITIZER_REDZONE(name) + #endif + #ifndef ANNOTATE_MEMORY_IS_UNINITIALIZED + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) + #endif + +#else /* !__native_client__ */ + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + + /* ------------------------------------------------------------- + Annotations that suppress errors. It is usually better to express the + program's synchronization using the other annotations, but these can + be used when all else fails. */ + + /* Report that we may have a benign race at "pointer", with size + "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the + point where "pointer" has been allocated, preferably close to the point + where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */ + #define ANNOTATE_BENIGN_RACE(pointer, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ + sizeof(*(pointer)), description) + + /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to + the memory range [address, address+size). */ + #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) + + /* Enable (enable!=0) or disable (enable==0) race detection for all threads. + This annotation could be useful if you want to skip expensive race analysis + during some period of program execution, e.g. during initialization. */ + #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + AnnotateEnableRaceDetection(__FILE__, __LINE__, enable) + + /* ------------------------------------------------------------- + Annotations useful for debugging. */ + + /* Report the current thread name to a race detector. */ + #define ANNOTATE_THREAD_NAME(name) \ + AnnotateThreadName(__FILE__, __LINE__, name) + + /* ------------------------------------------------------------- + Annotations useful when implementing locks. They are not + normally needed by modules that merely use locks. + The "lock" argument is a pointer to the lock object. */ + + /* Report that a lock has been created at address "lock". */ + #define ANNOTATE_RWLOCK_CREATE(lock) \ + AnnotateRWLockCreate(__FILE__, __LINE__, lock) + + /* Report that a linker initialized lock has been created at address "lock". + */ +#ifdef THREAD_SANITIZER + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + AnnotateRWLockCreateStatic(__FILE__, __LINE__, lock) +#else + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + + /* Report that the lock at address "lock" is about to be destroyed. */ + #define ANNOTATE_RWLOCK_DESTROY(lock) \ + AnnotateRWLockDestroy(__FILE__, __LINE__, lock) + + /* Report that the lock at address "lock" has been acquired. + is_w=1 for writer lock, is_w=0 for reader lock. */ + #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) + + /* Report that the lock at address "lock" is about to be released. */ + #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) + +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + + #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */ + #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) /* empty */ + #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ + #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ + #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ + #define ANNOTATE_BENIGN_RACE(address, description) /* empty */ + #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ + #define ANNOTATE_THREAD_NAME(name) /* empty */ + #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ + +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +/* These annotations are also made available to LLVM's Memory Sanitizer */ +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER) + #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + AnnotateMemoryIsInitialized(__FILE__, __LINE__, address, size) + + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + AnnotateMemoryIsUninitialized(__FILE__, __LINE__, address, size) +#else + #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */ + #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) /* empty */ +#endif /* DYNAMIC_ANNOTATIONS_ENABLED || MEMORY_SANITIZER */ +/* TODO(delesley) -- Replace __CLANG_SUPPORT_DYN_ANNOTATION__ with the + appropriate feature ID. */ +#if defined(__clang__) && (!defined(SWIG)) \ + && defined(__CLANG_SUPPORT_DYN_ANNOTATION__) + + #if DYNAMIC_ANNOTATIONS_ENABLED == 0 + #define ANNOTALYSIS_ENABLED + #endif + + /* When running in opt-mode, GCC will issue a warning, if these attributes are + compiled. Only include them when compiling using Clang. */ + #define ATTRIBUTE_IGNORE_READS_BEGIN \ + __attribute((exclusive_lock_function("*"))) + #define ATTRIBUTE_IGNORE_READS_END \ + __attribute((unlock_function("*"))) +#else + #define ATTRIBUTE_IGNORE_READS_BEGIN /* empty */ + #define ATTRIBUTE_IGNORE_READS_END /* empty */ +#endif /* defined(__clang__) && ... */ + +#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) || defined(ANNOTALYSIS_ENABLED) + #define ANNOTATIONS_ENABLED +#endif + +#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) + + /* Request the analysis tool to ignore all reads in the current thread + until ANNOTATE_IGNORE_READS_END is called. + Useful to ignore intentional racey reads, while still checking + other reads and all writes. + See also ANNOTATE_UNPROTECTED_READ. */ + #define ANNOTATE_IGNORE_READS_BEGIN() \ + AnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + /* Stop ignoring reads. */ + #define ANNOTATE_IGNORE_READS_END() \ + AnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. */ + #define ANNOTATE_IGNORE_WRITES_BEGIN() \ + AnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + /* Stop ignoring writes. */ + #define ANNOTATE_IGNORE_WRITES_END() \ + AnnotateIgnoreWritesEnd(__FILE__, __LINE__) + +/* Clang provides limited support for static thread-safety analysis + through a feature called Annotalysis. We configure macro-definitions + according to whether Annotalysis support is available. */ +#elif defined(ANNOTALYSIS_ENABLED) + + #define ANNOTATE_IGNORE_READS_BEGIN() \ + StaticAnnotateIgnoreReadsBegin(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_READS_END() \ + StaticAnnotateIgnoreReadsEnd(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_WRITES_BEGIN() \ + StaticAnnotateIgnoreWritesBegin(__FILE__, __LINE__) + + #define ANNOTATE_IGNORE_WRITES_END() \ + StaticAnnotateIgnoreWritesEnd(__FILE__, __LINE__) + +#else + #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_READS_END() /* empty */ + #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_WRITES_END() /* empty */ +#endif + +/* Implement the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more + primitive annotations defined above. */ +#if defined(ANNOTATIONS_ENABLED) + + /* Start ignoring all memory accesses (both reads and writes). */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + }while (0) + + /* Stop ignoring both reads and writes. */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + }while (0) + +#else + #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ + #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ +#endif + +/* Use the macros above rather than using these functions directly. */ +#include +#ifdef __cplusplus +extern "C" { +#endif +void AnnotateRWLockCreate(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockCreateStatic(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockDestroy(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockAcquired(const char *file, int line, + const volatile void *lock, long is_w); /* NOLINT */ +void AnnotateRWLockReleased(const char *file, int line, + const volatile void *lock, long is_w); /* NOLINT */ +void AnnotateBenignRace(const char *file, int line, + const volatile void *address, + const char *description); +void AnnotateBenignRaceSized(const char *file, int line, + const volatile void *address, + size_t size, + const char *description); +void AnnotateThreadName(const char *file, int line, + const char *name); +void AnnotateEnableRaceDetection(const char *file, int line, int enable); +void AnnotateMemoryIsInitialized(const char *file, int line, + const volatile void *mem, size_t size); +void AnnotateMemoryIsUninitialized(const char *file, int line, + const volatile void *mem, size_t size); + +/* Annotations expand to these functions, when Dynamic Annotations are enabled. + These functions are either implemented as no-op calls, if no Sanitizer is + attached, or provided with externally-linked implementations by a library + like ThreadSanitizer. */ +void AnnotateIgnoreReadsBegin(const char *file, int line) + ATTRIBUTE_IGNORE_READS_BEGIN; +void AnnotateIgnoreReadsEnd(const char *file, int line) + ATTRIBUTE_IGNORE_READS_END; +void AnnotateIgnoreWritesBegin(const char *file, int line); +void AnnotateIgnoreWritesEnd(const char *file, int line); + +#if defined(ANNOTALYSIS_ENABLED) +/* When Annotalysis is enabled without Dynamic Annotations, the use of + static-inline functions allows the annotations to be read at compile-time, + while still letting the compiler elide the functions from the final build. + + TODO(delesley) -- The exclusive lock here ignores writes as well, but + allows INGORE_READS_AND_WRITES to work properly. */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-function" +static inline void StaticAnnotateIgnoreReadsBegin(const char *file, int line) + ATTRIBUTE_IGNORE_READS_BEGIN { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreReadsEnd(const char *file, int line) + ATTRIBUTE_IGNORE_READS_END { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreWritesBegin( + const char *file, int line) { (void)file; (void)line; } +static inline void StaticAnnotateIgnoreWritesEnd( + const char *file, int line) { (void)file; (void)line; } +#pragma GCC diagnostic pop +#endif + +/* Return non-zero value if running under valgrind. + + If "valgrind.h" is included into dynamic_annotations.cc, + the regular valgrind mechanism will be used. + See http://valgrind.org/docs/manual/manual-core-adv.html about + RUNNING_ON_VALGRIND and other valgrind "client requests". + The file "valgrind.h" may be obtained by doing + svn co svn://svn.valgrind.org/valgrind/trunk/include + + If for some reason you can't use "valgrind.h" or want to fake valgrind, + there are two ways to make this function return non-zero: + - Use environment variable: export RUNNING_ON_VALGRIND=1 + - Make your tool intercept the function RunningOnValgrind() and + change its return value. + */ +int RunningOnValgrind(void); + +/* ValgrindSlowdown returns: + * 1.0, if (RunningOnValgrind() == 0) + * 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL) + * atof(getenv("VALGRIND_SLOWDOWN")) otherwise + This function can be used to scale timeout values: + EXAMPLE: + for (;;) { + DoExpensiveBackgroundTask(); + SleepForSeconds(5 * ValgrindSlowdown()); + } + */ +double ValgrindSlowdown(void); + +#ifdef __cplusplus +} +#endif + +/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. + + Instead of doing + ANNOTATE_IGNORE_READS_BEGIN(); + ... = x; + ANNOTATE_IGNORE_READS_END(); + one can use + ... = ANNOTATE_UNPROTECTED_READ(x); */ +#if defined(__cplusplus) && defined(ANNOTATIONS_ENABLED) +template +inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { /* NOLINT */ + ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ANNOTATE_IGNORE_READS_END(); + return res; + } +#else + #define ANNOTATE_UNPROTECTED_READ(x) (x) +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) + /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ + #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var ## _annotator { \ + public: \ + static_var ## _annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ + sizeof(static_var), \ + # static_var ": " description); \ + } \ + }; \ + static static_var ## _annotator the ## static_var ## _annotator;\ + } // namespace +#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ + #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ +#endif /* DYNAMIC_ANNOTATIONS_ENABLED */ + +#ifdef ADDRESS_SANITIZER +/* Describe the current state of a contiguous container such as e.g. + * std::vector or std::string. For more details see + * sanitizer/common_interface_defs.h, which is provided by the compiler. */ +#include +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { char x[8] __attribute__ ((aligned (8))); } name +#else +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) +#endif // ADDRESS_SANITIZER + +/* Undefine the macros intended only in this file. */ +#undef ANNOTALYSIS_ENABLED +#undef ANNOTATIONS_ENABLED +#undef ATTRIBUTE_IGNORE_READS_BEGIN +#undef ATTRIBUTE_IGNORE_READS_END + +#endif /* !__native_client__ */ + +#endif /* ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ */ diff --git a/absl/base/internal/atomic_hook.h b/absl/base/internal/atomic_hook.h new file mode 100644 index 000000000..8eee367e4 --- /dev/null +++ b/absl/base/internal/atomic_hook.h @@ -0,0 +1,122 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include +#include +#include + +namespace absl { +namespace base_internal { + +// In current versions of MSVC (as of July 2017), a std::atomic where T is a +// pointer to function cannot be constant-initialized with an address constant +// expression. That is, the following code does not compile: +// void NoOp() {} +// constexpr std::atomic ptr(NoOp); +// +// This is the only compiler we support that seems to have this issue. We +// conditionalize on MSVC here to use a fallback implementation. But we +// should revisit this occasionally. If MSVC fixes this compiler bug, we +// can then change this to be conditionalized on the value on _MSC_FULL_VER +// instead. +#ifdef _MSC_FULL_VER +#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 0 +#else +#define ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION 1 +#endif + +template +class AtomicHook; + +// AtomicHook is a helper class, templatized on a raw function pointer type, for +// implementing Abseil customization hooks. It is a callable object that +// dispatches to the registered hook, or performs a no-op (and returns a default +// constructed object) if no hook has been registered. +// +// Reads and writes guarantee memory_order_acquire/memory_order_release +// semantics. +template +class AtomicHook { + public: + using FnPtr = ReturnType (*)(Args...); + + constexpr AtomicHook() : hook_(DummyFunction) {} + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) { + assert(fn); + FnPtr expected = DummyFunction; + hook_.compare_exchange_strong(expected, fn, std::memory_order_acq_rel, + std::memory_order_acquire); + // If the compare and exchange failed, make sure that's because hook_ was + // already set to `fn` by an earlier call. Any other state reflects an API + // violation (calling Store() multiple times with different values). + // + // Avoid ABSL_RAW_CHECK, since raw logging depends on AtomicHook. + assert(expected == DummyFunction || expected == fn); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template + ReturnType operator()(CallArgs&&... args) const { + FnPtr hook = hook_.load(std::memory_order_acquire); + if (ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION || hook) { + return hook(std::forward(args)...); + } else { + return ReturnType(); + } + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const { + FnPtr ptr = hook_.load(std::memory_order_acquire); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: +#if ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION + static ReturnType DummyFunction(Args...) { + return ReturnType(); + } +#else + static constexpr FnPtr DummyFunction = nullptr; +#endif + + std::atomic hook_; +}; + +#undef ABSL_HAVE_FUNCTION_ADDRESS_CONSTANT_EXPRESSION + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc new file mode 100644 index 000000000..a742df01f --- /dev/null +++ b/absl/base/internal/cycleclock.cc @@ -0,0 +1,81 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The implementation of CycleClock::Frequency. +// +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h + +#include "absl/base/internal/cycleclock.h" + +#include // NOLINT(build/c++11) + +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace { + +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +static constexpr int32_t kShift = 1; +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +static constexpr int32_t kShift = 0; +#endif +#else +// In debug mode use a different shift to discourage depending on a +// particular shift value. +static constexpr int32_t kShift = 2; +#endif + +static constexpr double kFrequencyScale = 1.0 / (1 << kShift); + +} // namespace + +int64_t CycleClock::Now() { + return base_internal::UnscaledCycleClock::Now() >> kShift; +} + +double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +#else + +int64_t CycleClock::Now() { + return std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch()) + .count(); +} + +double CycleClock::Frequency() { + return 1e9; +} + +#endif + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/cycleclock.h b/absl/base/internal/cycleclock.h new file mode 100644 index 000000000..60e971583 --- /dev/null +++ b/absl/base/internal/cycleclock.h @@ -0,0 +1,77 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include + +namespace absl { +namespace base_internal { + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; +}; + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h new file mode 100644 index 000000000..602129eed --- /dev/null +++ b/absl/base/internal/endian.h @@ -0,0 +1,267 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +// The following guarantees declaration of the byte swap functions +#ifdef _MSC_VER +#include // NOLINT(build/include) +#elif defined(__APPLE__) +// Mac OS X / Darwin features +#include +#elif defined(__GLIBC__) +#include // IWYU pragma: export +#endif + +#include +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl { + +// Use compiler byte-swapping intrinsics if they are available. 32-bit +// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. +// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. +// For simplicity, we enable them all only for GCC 4.8.0 or later. +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) +inline uint64_t gbswap_64(uint64_t host_int) { + return __builtin_bswap64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return __builtin_bswap32(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return __builtin_bswap16(host_int); +} + +#elif defined(_MSC_VER) +inline uint64_t gbswap_64(uint64_t host_int) { + return _byteswap_uint64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return _byteswap_ulong(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return _byteswap_ushort(host_int); +} + +#elif defined(__APPLE__) +inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); } +inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); } +inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); } + +#else +inline uint64_t gbswap_64(uint64_t host_int) { +#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) + // Adapted from /usr/include/byteswap.h. Not available on Mac. + if (__builtin_constant_p(host_int)) { + return __bswap_constant_64(host_int); + } else { + register uint64_t result; + __asm__("bswap %0" : "=r"(result) : "0"(host_int)); + return result; + } +#elif defined(__GLIBC__) + return bswap_64(host_int); +#else + return (((x & uint64_t{(0xFF}) << 56) | + ((x & uint64_t{(0xFF00}) << 40) | + ((x & uint64_t{(0xFF0000}) << 24) | + ((x & uint64_t{(0xFF000000}) << 8) | + ((x & uint64_t{(0xFF00000000}) >> 8) | + ((x & uint64_t{(0xFF0000000000}) >> 24) | + ((x & uint64_t{(0xFF000000000000}) >> 40) | + ((x & uint64_t{(0xFF00000000000000}) >> 56)); +#endif // bswap_64 +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if defined(__GLIBC__) + return bswap_32(host_int); +#else + return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) | + ((x & 0xFF000000) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if defined(__GLIBC__) + return bswap_16(host_int); +#else + return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)}; +#endif +} + +#endif // intrinics available + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Definitions for ntohl etc. that don't require us to include +// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather +// than just #defining them because in debug mode, gcc doesn't +// correctly handle the (rather involved) definitions of bswap_32. +// gcc guarantees that inline functions are as fast as macros, so +// this isn't a performance hit. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// These definitions are simpler on big-endian machines +// These are functions instead of macros to avoid self-assignment warnings +// on calls such as "i = ghtnol(i);". This also provides type checking. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/absl/base/internal/endian_test.cc b/absl/base/internal/endian_test.cc new file mode 100644 index 000000000..6812214ef --- /dev/null +++ b/absl/base/internal/endian_test.cc @@ -0,0 +1,281 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/endian.h" + +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/config.h" + +namespace absl { +namespace { + +const uint64_t kInitialNumber{0x0123456789abcdef}; +const uint64_t k64Value{kInitialNumber}; +const uint32_t k32Value{0x01234567}; +const uint16_t k16Value{0x0123}; +const int kNumValuesToTest = 1000000; +const int kRandomSeed = 12345; + +#ifdef ABSL_IS_BIG_ENDIAN +const uint64_t kInitialInNetworkOrder{kInitialNumber}; +const uint64_t k64ValueLE{0xefcdab8967452301}; +const uint32_t k32ValueLE{0x67452301}; +const uint16_t k16ValueLE{0x2301}; +const uint8_t k8ValueLE{k8Value}; +const uint64_t k64IValueLE{0xefcdab89674523a1}; +const uint32_t k32IValueLE{0x67452391}; +const uint16_t k16IValueLE{0x85ff}; +const uint8_t k8IValueLE{0xff}; +const uint64_t kDoubleValueLE{0x6e861bf0f9210940}; +const uint32_t kFloatValueLE{0xd00f4940}; +const uint8_t kBoolValueLE{0x1}; + +const uint64_t k64ValueBE{kInitialNumber}; +const uint32_t k32ValueBE{k32Value}; +const uint16_t k16ValueBE{k16Value}; +const uint8_t k8ValueBE{k8Value}; +const uint64_t k64IValueBE{0xa123456789abcdef}; +const uint32_t k32IValueBE{0x91234567}; +const uint16_t k16IValueBE{0xff85}; +const uint8_t k8IValueBE{0xff}; +const uint64_t kDoubleValueBE{0x400921f9f01b866e}; +const uint32_t kFloatValueBE{0x40490fd0}; +const uint8_t kBoolValueBE{0x1}; +#elif defined ABSL_IS_LITTLE_ENDIAN +const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; +const uint64_t k64ValueLE{kInitialNumber}; +const uint32_t k32ValueLE{k32Value}; +const uint16_t k16ValueLE{k16Value}; + +const uint64_t k64ValueBE{0xefcdab8967452301}; +const uint32_t k32ValueBE{0x67452301}; +const uint16_t k16ValueBE{0x2301}; +#endif + +template +std::vector GenerateAllValuesForType() { + std::vector result; + T next = std::numeric_limits::min(); + while (true) { + result.push_back(next); + if (next == std::numeric_limits::max()) { + return result; + } + ++next; + } +} + +template +std::vector GenerateRandomIntegers(size_t numValuesToTest) { + std::vector result; + std::mt19937_64 rng(kRandomSeed); + for (size_t i = 0; i < numValuesToTest; ++i) { + result.push_back(rng()); + } + return result; +} + +void ManualByteSwap(char* bytes, int length) { + if (length == 1) + return; + + EXPECT_EQ(0, length % 2); + for (int i = 0; i < length / 2; ++i) { + int j = (length - 1) - i; + using std::swap; + swap(bytes[i], bytes[j]); + } +} + +template +inline T UnalignedLoad(const char* p) { + static_assert( + sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, + "Unexpected type size"); + + switch (sizeof(T)) { + case 1: return *reinterpret_cast(p); + case 2: + return ABSL_INTERNAL_UNALIGNED_LOAD16(p); + case 4: + return ABSL_INTERNAL_UNALIGNED_LOAD32(p); + case 8: + return ABSL_INTERNAL_UNALIGNED_LOAD64(p); + default: + // Suppresses invalid "not all control paths return a value" on MSVC + return {}; + } +} + +template +static void GBSwapHelper(const std::vector& host_values_to_test, + const ByteSwapper& byte_swapper) { + // Test byte_swapper against a manual byte swap. + for (typename std::vector::const_iterator it = host_values_to_test.begin(); + it != host_values_to_test.end(); ++it) { + T host_value = *it; + + char actual_value[sizeof(host_value)]; + memcpy(actual_value, &host_value, sizeof(host_value)); + byte_swapper(actual_value); + + char expected_value[sizeof(host_value)]; + memcpy(expected_value, &host_value, sizeof(host_value)); + ManualByteSwap(expected_value, sizeof(host_value)); + + ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) + << "Swap output for 0x" << std::hex << host_value << " does not match. " + << "Expected: 0x" << UnalignedLoad(expected_value) << "; " + << "actual: 0x" << UnalignedLoad(actual_value); + } +} + +void Swap16(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE16( + bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); +} + +void Swap32(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE32( + bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); +} + +void Swap64(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE64( + bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); +} + +TEST(EndianessTest, Uint16) { + GBSwapHelper(GenerateAllValuesForType(), &Swap16); +} + +TEST(EndianessTest, Uint32) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap32); +} + +TEST(EndianessTest, Uint64) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap64); +} + +TEST(EndianessTest, ghtonll_gntohll) { + // Test that absl::ghtonl compiles correctly + uint32_t test = 0x01234567; + EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); + + uint64_t comp = absl::ghtonll(kInitialNumber); + EXPECT_EQ(comp, kInitialInNetworkOrder); + comp = absl::gntohll(kInitialInNetworkOrder); + EXPECT_EQ(comp, kInitialNumber); + + // Test that htonll and ntohll are each others' inverse functions on a + // somewhat assorted batch of numbers. 37 is chosen to not be anything + // particularly nice base 2. + uint64_t value = 1; + for (int i = 0; i < 100; ++i) { + comp = absl::ghtonll(absl::gntohll(value)); + EXPECT_EQ(value, comp); + comp = absl::gntohll(absl::ghtonll(value)); + EXPECT_EQ(value, comp); + value *= 37; + } +} + +TEST(EndianessTest, little_endian) { + // Check little_endian uint16_t. + uint64_t comp = little_endian::FromHost16(k16Value); + EXPECT_EQ(comp, k16ValueLE); + comp = little_endian::ToHost16(k16ValueLE); + EXPECT_EQ(comp, k16Value); + + // Check little_endian uint32_t. + comp = little_endian::FromHost32(k32Value); + EXPECT_EQ(comp, k32ValueLE); + comp = little_endian::ToHost32(k32ValueLE); + EXPECT_EQ(comp, k32Value); + + // Check little_endian uint64_t. + comp = little_endian::FromHost64(k64Value); + EXPECT_EQ(comp, k64ValueLE); + comp = little_endian::ToHost64(k64ValueLE); + EXPECT_EQ(comp, k64Value); + + // Check little-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + little_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueLE); + comp = little_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + little_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueLE); + comp = little_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + little_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueLE); + comp = little_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); +} + +TEST(EndianessTest, big_endian) { + // Check big-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + unsigned char buffer[10]; + big_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + uint64_t comp = big_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); + + big_endian::Store16(buffer + 1, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + comp = big_endian::Load16(buffer + 1); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(buffer + 1, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(buffer + 1); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(buffer + 1, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(buffer + 1); + EXPECT_EQ(comp, k64Value); +} + +} // namespace +} // namespace absl diff --git a/absl/base/internal/exception_testing.h b/absl/base/internal/exception_testing.h new file mode 100644 index 000000000..99a107348 --- /dev/null +++ b/absl/base/internal/exception_testing.h @@ -0,0 +1,24 @@ +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#else + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/absl/base/internal/identity.h b/absl/base/internal/identity.h new file mode 100644 index 000000000..a6734b4d3 --- /dev/null +++ b/absl/base/internal/identity.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +namespace absl { +namespace internal { + +template +struct identity { + typedef T type; +}; + +template +using identity_t = typename identity::type; + +} // namespace internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h new file mode 100644 index 000000000..8c3f4f606 --- /dev/null +++ b/absl/base/internal/invoke.h @@ -0,0 +1,188 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::Invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within Invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include +#include +#include + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template +struct StrippedAccept { + template + struct Accept : Derived::template AcceptImpl::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::is_base_of {}; + + template + struct AcceptImpl + : std::is_base_of {}; + + template + static decltype((std::declval().* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { + return (std::forward(obj).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value> {}; + + template + struct AcceptImpl + : std::integral_constant::value> {}; + + template + static decltype(((*std::declval()).* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward(ptr)).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl : std::is_base_of {}; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward(ref).*std::forward(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value> {}; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward(ptr)).*std::forward(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); + } +}; + +// Resolves to the first matching clause. +template +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, DataMemAndRef, + typename std::conditional::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke. +template +using InvokeT = decltype(Invoker::type::Invoke( + std::declval(), std::declval()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template +InvokeT Invoke(F&& f, Args&&... args) { + return Invoker::type::Invoke(std::forward(f), + std::forward(args)...); +} +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/absl/base/internal/log_severity.cc b/absl/base/internal/log_severity.cc new file mode 100644 index 000000000..430ac4584 --- /dev/null +++ b/absl/base/internal/log_severity.cc @@ -0,0 +1,15 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/log_severity.h" diff --git a/absl/base/internal/log_severity.h b/absl/base/internal/log_severity.h new file mode 100644 index 000000000..deaf6a578 --- /dev/null +++ b/absl/base/internal/log_severity.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ + +#include "absl/base/attributes.h" + +namespace absl { + +enum class LogSeverity : int { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, +}; + +constexpr const char* LogSeverityName(absl::LogSeverity s) { + return s == absl::LogSeverity::kInfo + ? "INFO" + : s == absl::LogSeverity::kWarning + ? "WARNING" + : s == absl::LogSeverity::kError + ? "ERROR" + : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; +} + +// Note that out-of-range large severities normalize to kError, not kFatal. +constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { + return s < absl::LogSeverity::kInfo + ? absl::LogSeverity::kInfo + : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; +} +constexpr absl::LogSeverity NormalizeLogSeverity(int s) { + return NormalizeLogSeverity(static_cast(s)); +} + +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc new file mode 100644 index 000000000..c55201b38 --- /dev/null +++ b/absl/base/internal/low_level_alloc.cc @@ -0,0 +1,598 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A low-level allocator that can be used by other low-level +// modules without introducing dependency cycles. +// This allocator is slow and wasteful of memory; +// it should not be used when performance is key. + +#include "absl/base/config.h" + +#include "absl/base/internal/low_level_alloc.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING + +#ifndef _WIN32 +#include +#include +#include +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include // for placement-new + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/malloc_hook.h" +#include "absl/base/internal/malloc_hook_invoke.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +// MAP_ANONYMOUS +#if defined(__APPLE__) +// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is +// deprecated. In Darwin, MAP_ANON is all there is. +#if !defined MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif // !MAP_ANONYMOUS +#endif // __APPLE__ + +namespace absl { +namespace base_internal { + +// A first-fit allocator with amortized logarithmic free() time. + +// --------------------------------------------------------------------------- +static const int kMaxLevel = 30; + +namespace { +// This struct describes one allocated block, or one free block. +struct AllocList { + struct Header { + // Size of entire region, including this field. Must be + // first. Valid in both allocated and unallocated blocks. + uintptr_t size; + + // kMagicAllocated or kMagicUnallocated xor this. + uintptr_t magic; + + // Pointer to parent arena. + LowLevelAlloc::Arena *arena; + + // Aligns regions to 0 mod 2*sizeof(void*). + void *dummy_for_alignment; + } header; + + // Next two fields: in unallocated blocks: freelist skiplist data + // in allocated blocks: overlaps with client data + + // Levels in skiplist used. + int levels; + + // Actually has levels elements. The AllocList node may not have room + // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). + AllocList *next[kMaxLevel]; +}; +} // namespace + +// --------------------------------------------------------------------------- +// A trivial skiplist implementation. This is used to keep the freelist +// in address order while taking only logarithmic time per insert and delete. + +// An integer approximation of log2(size/base) +// Requires size >= base. +static int IntLog2(size_t size, size_t base) { + int result = 0; + for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) + result++; + } + // floor(size / 2**result) <= base < floor(size / 2**(result-1)) + // => log2(size/(base+1)) <= result < 1+log2(size/base) + // => result ~= log2(size/base) + return result; +} + +// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. +static int Random(uint32_t *state) { + uint32_t r = *state; + int result = 1; + while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { + result++; + } + *state = r; + return result; +} + +// Return a number of skiplist levels for a node of size bytes, where +// base is the minimum node size. Compute level=log2(size / base)+n +// where n is 1 if random is false and otherwise a random number generated with +// the standard distribution for a skiplist: See Random() above. +// Bigger nodes tend to have more skiplist levels due to the log2(size / base) +// term, so first-fit searches touch fewer nodes. "level" is clipped so +// level(level) > max_fit) level = static_cast(max_fit); + if (level > kMaxLevel-1) level = kMaxLevel - 1; + ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); + return level; +} + +// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. +// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater +// points to the last element at level i in the AllocList less than *e, or is +// head if no such element exists. +static AllocList *LLA_SkiplistSearch(AllocList *head, + AllocList *e, AllocList **prev) { + AllocList *p = head; + for (int level = head->levels - 1; level >= 0; level--) { + for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { + } + prev[level] = p; + } + return (head->levels == 0) ? nullptr : prev[0]->next[0]; +} + +// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. +// Requires that e->levels be previously set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistInsert(AllocList *head, AllocList *e, + AllocList **prev) { + LLA_SkiplistSearch(head, e, prev); + for (; head->levels < e->levels; head->levels++) { // extend prev pointers + prev[head->levels] = head; // to all *e's levels + } + for (int i = 0; i != e->levels; i++) { // add element to list + e->next[i] = prev[i]->next[i]; + prev[i]->next[i] = e; + } +} + +// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). +// Requires that e->levels be previous set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistDelete(AllocList *head, AllocList *e, + AllocList **prev) { + AllocList *found = LLA_SkiplistSearch(head, e, prev); + ABSL_RAW_CHECK(e == found, "element not in freelist"); + for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { + prev[i]->next[i] = e->next[i]; + } + while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { + head->levels--; // reduce head->levels if level unused + } +} + +// --------------------------------------------------------------------------- +// Arena implementation + +struct LowLevelAlloc::Arena { + // This constructor does nothing, and relies on zero-initialization to get + // the proper initial state. + Arena() : mu(base_internal::kLinkerInitialized) {} // NOLINT + explicit Arena(int) // NOLINT(readability/casting) + : // Avoid recursive cooperative scheduling w/ kernel scheduling. + mu(base_internal::SCHEDULE_KERNEL_ONLY), + // Set pagesize to zero explicitly for non-static init. + pagesize(0), + random(0) {} + + base_internal::SpinLock mu; // protects freelist, allocation_count, + // pagesize, roundup, min_size + AllocList freelist; // head of free list; sorted by addr (under mu) + int32_t allocation_count; // count of allocated blocks (under mu) + std::atomic flags; // flags passed to NewArena (ro after init) + size_t pagesize; // ==getpagesize() (init under mu, then ro) + size_t roundup; // lowest 2^n >= max(16,sizeof (AllocList)) + // (init under mu, then ro) + size_t min_size; // smallest allocation block size + // (init under mu, then ro) + uint32_t random; // PRNG state +}; + +// The default arena, which is used when 0 is passed instead of an Arena +// pointer. +static struct LowLevelAlloc::Arena default_arena; // NOLINT + +// Non-malloc-hooked arenas: used only to allocate metadata for arenas that +// do not want malloc hook reporting, so that for them there's no malloc hook +// reporting even during arena creation. +static struct LowLevelAlloc::Arena unhooked_arena; // NOLINT + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena; // NOLINT +#endif + +// magic numbers to identify allocated and unallocated blocks +static const uintptr_t kMagicAllocated = 0x4c833e95U; +static const uintptr_t kMagicUnallocated = ~kMagicAllocated; + +namespace { +class SCOPED_LOCKABLE ArenaLock { + public: + explicit ArenaLock(LowLevelAlloc::Arena *arena) + EXCLUSIVE_LOCK_FUNCTION(arena->mu) + : arena_(arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (arena == &unhooked_async_sig_safe_arena || + (arena->flags.load(std::memory_order_relaxed) & + LowLevelAlloc::kAsyncSignalSafe) != 0) { + sigset_t all; + sigfillset(&all); + mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; + } +#endif + arena_->mu.Lock(); + } + ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } + void Leave() UNLOCK_FUNCTION() { + arena_->mu.Unlock(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (mask_valid_) { + pthread_sigmask(SIG_SETMASK, &mask_, nullptr); + } +#endif + left_ = true; + } + + private: + bool left_ = false; // whether left region +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + bool mask_valid_ = false; + sigset_t mask_; // old mask of blocked signals +#endif + LowLevelAlloc::Arena *arena_; + ArenaLock(const ArenaLock &) = delete; + ArenaLock &operator=(const ArenaLock &) = delete; +}; +} // namespace + +// create an appropriate magic number for an object at "ptr" +// "magic" should be kMagicAllocated or kMagicUnallocated +inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { + return magic ^ reinterpret_cast(ptr); +} + +// Initialize the fields of an Arena +static void ArenaInit(LowLevelAlloc::Arena *arena) { + if (arena->pagesize == 0) { +#ifdef _WIN32 + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + arena->pagesize = std::max(system_info.dwPageSize, + system_info.dwAllocationGranularity); +#else + arena->pagesize = getpagesize(); +#endif + // Round up block sizes to a power of two close to the header size. + arena->roundup = 16; + while (arena->roundup < sizeof (arena->freelist.header)) { + arena->roundup += arena->roundup; + } + // Don't allocate blocks less than twice the roundup size to avoid tiny + // free blocks. + arena->min_size = 2 * arena->roundup; + arena->freelist.header.size = 0; + arena->freelist.header.magic = + Magic(kMagicUnallocated, &arena->freelist.header); + arena->freelist.header.arena = arena; + arena->freelist.levels = 0; + memset(arena->freelist.next, 0, sizeof (arena->freelist.next)); + arena->allocation_count = 0; + if (arena == &default_arena) { + // Default arena should be hooked, e.g. for heap-checker to trace + // pointer chains through objects in the default arena. + arena->flags.store(LowLevelAlloc::kCallMallocHook, + std::memory_order_relaxed); + } +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + else if (arena == // NOLINT(readability/braces) + &unhooked_async_sig_safe_arena) { + arena->flags.store(LowLevelAlloc::kAsyncSignalSafe, + std::memory_order_relaxed); + } +#endif + else { // NOLINT(readability/braces) + // other arenas' flags may be overridden by client, + // but unhooked_arena will have 0 in 'flags'. + arena->flags.store(0, std::memory_order_relaxed); + } + } +} + +// L < meta_data_arena->mu +LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags, + Arena *meta_data_arena) { + ABSL_RAW_CHECK(meta_data_arena != nullptr, "must pass a valid arena"); + if (meta_data_arena == &default_arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + meta_data_arena = &unhooked_async_sig_safe_arena; + } else // NOLINT(readability/braces) +#endif + if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { + meta_data_arena = &unhooked_arena; + } + } + // Arena(0) uses the constructor for non-static contexts + Arena *result = + new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); + ArenaInit(result); + result->flags.store(flags, std::memory_order_relaxed); + return result; +} + +// L < arena->mu, L < arena->arena->mu +bool LowLevelAlloc::DeleteArena(Arena *arena) { + ABSL_RAW_CHECK( + arena != nullptr && arena != &default_arena && arena != &unhooked_arena, + "may not delete default arena"); + ArenaLock section(arena); + bool empty = (arena->allocation_count == 0); + section.Leave(); + if (empty) { + while (arena->freelist.next[0] != nullptr) { + AllocList *region = arena->freelist.next[0]; + size_t size = region->header.size; + arena->freelist.next[0] = region->next[0]; + ABSL_RAW_CHECK( + region->header.magic == Magic(kMagicUnallocated, ®ion->header), + "bad magic number in DeleteArena()"); + ABSL_RAW_CHECK(region->header.arena == arena, + "bad arena pointer in DeleteArena()"); + ABSL_RAW_CHECK(size % arena->pagesize == 0, + "empty arena has non-page-aligned block size"); + ABSL_RAW_CHECK(reinterpret_cast(region) % arena->pagesize == 0, + "empty arena has non-page-aligned block"); + int munmap_result; +#ifdef _WIN32 + munmap_result = VirtualFree(region, 0, MEM_RELEASE); + ABSL_RAW_CHECK(munmap_result != 0, + "LowLevelAlloc::DeleteArena: VitualFree failed"); +#else + if ((arena->flags.load(std::memory_order_relaxed) & + LowLevelAlloc::kAsyncSignalSafe) == 0) { + munmap_result = munmap(region, size); + } else { + munmap_result = MallocHook::UnhookedMUnmap(region, size); + } + if (munmap_result != 0) { + ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", + errno); + } +#endif + } + Free(arena); + } + return empty; +} + +// --------------------------------------------------------------------------- + +// Addition, checking for overflow. The intent is to die if an external client +// manages to push through a request that would cause arithmetic to fail. +static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { + uintptr_t sum = a + b; + ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); + return sum; +} + +// Return value rounded up to next multiple of align. +// align must be a power of two. +static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { + return CheckedAdd(addr, align - 1) & ~(align - 1); +} + +// Equivalent to "return prev->next[i]" but with sanity checking +// that the freelist is in the correct order, that it +// consists of regions marked "unallocated", and that no two regions +// are adjacent in memory (they should have been coalesced). +// L < arena->mu +static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { + ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); + AllocList *next = prev->next[i]; + if (next != nullptr) { + ABSL_RAW_CHECK( + next->header.magic == Magic(kMagicUnallocated, &next->header), + "bad magic number in Next()"); + ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); + if (prev != &arena->freelist) { + ABSL_RAW_CHECK(prev < next, "unordered freelist"); + ABSL_RAW_CHECK(reinterpret_cast(prev) + prev->header.size < + reinterpret_cast(next), + "malformed freelist"); + } + } + return next; +} + +// Coalesce list item "a" with its successor if they are adjacent. +static void Coalesce(AllocList *a) { + AllocList *n = a->next[0]; + if (n != nullptr && reinterpret_cast(a) + a->header.size == + reinterpret_cast(n)) { + LowLevelAlloc::Arena *arena = a->header.arena; + a->header.size += n->header.size; + n->header.magic = 0; + n->header.arena = nullptr; + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, n, prev); + LLA_SkiplistDelete(&arena->freelist, a, prev); + a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, + &arena->random); + LLA_SkiplistInsert(&arena->freelist, a, prev); + } +} + +// Adds block at location "v" to the free list +// L >= arena->mu +static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { + AllocList *f = reinterpret_cast( + reinterpret_cast(v) - sizeof (f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in AddToFreelist()"); + ABSL_RAW_CHECK(f->header.arena == arena, + "bad arena pointer in AddToFreelist()"); + f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, + &arena->random); + AllocList *prev[kMaxLevel]; + LLA_SkiplistInsert(&arena->freelist, f, prev); + f->header.magic = Magic(kMagicUnallocated, &f->header); + Coalesce(f); // maybe coalesce with successor + Coalesce(prev[0]); // maybe coalesce with predecessor +} + +// Frees storage allocated by LowLevelAlloc::Alloc(). +// L < arena->mu +void LowLevelAlloc::Free(void *v) { + if (v != nullptr) { + AllocList *f = reinterpret_cast( + reinterpret_cast(v) - sizeof (f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in Free()"); + LowLevelAlloc::Arena *arena = f->header.arena; + if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) { + MallocHook::InvokeDeleteHook(v); + } + ArenaLock section(arena); + AddToFreelist(v, arena); + ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); + arena->allocation_count--; + section.Leave(); + } +} + +// allocates and returns a block of size bytes, to be freed with Free() +// L < arena->mu +static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { + void *result = nullptr; + if (request != 0) { + AllocList *s; // will point to region that satisfies request + ArenaLock section(arena); + ArenaInit(arena); + // round up with header + size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), + arena->roundup); + for (;;) { // loop until we find a suitable region + // find the minimum levels that a block of this size must have + int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; + if (i < arena->freelist.levels) { // potential blocks exist + AllocList *before = &arena->freelist; // predecessor of s + while ((s = Next(i, before, arena)) != nullptr && + s->header.size < req_rnd) { + before = s; + } + if (s != nullptr) { // we found a region + break; + } + } + // we unlock before mmap() both because mmap() may call a callback hook, + // and because it may be slow. + arena->mu.Unlock(); + // mmap generous 64K chunks to decrease + // the chances/impact of fragmentation: + size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); + void *new_pages; +#ifdef _WIN32 + new_pages = VirtualAlloc(0, new_pages_size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); +#else + if ((arena->flags.load(std::memory_order_relaxed) & + LowLevelAlloc::kAsyncSignalSafe) != 0) { + new_pages = MallocHook::UnhookedMMap(nullptr, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } else { + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + } + if (new_pages == MAP_FAILED) { + ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); + } +#endif + arena->mu.Lock(); + s = reinterpret_cast(new_pages); + s->header.size = new_pages_size; + // Pretend the block is allocated; call AddToFreelist() to free it. + s->header.magic = Magic(kMagicAllocated, &s->header); + s->header.arena = arena; + AddToFreelist(&s->levels, arena); // insert new region into free list + } + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list + // s points to the first free region that's big enough + if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { + // big enough to split + AllocList *n = reinterpret_cast + (req_rnd + reinterpret_cast(s)); + n->header.size = s->header.size - req_rnd; + n->header.magic = Magic(kMagicAllocated, &n->header); + n->header.arena = arena; + s->header.size = req_rnd; + AddToFreelist(&n->levels, arena); + } + s->header.magic = Magic(kMagicAllocated, &s->header); + ABSL_RAW_CHECK(s->header.arena == arena, ""); + arena->allocation_count++; + section.Leave(); + result = &s->levels; + } + ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); + return result; +} + +void *LowLevelAlloc::Alloc(size_t request) { + void *result = DoAllocWithArena(request, &default_arena); + if ((default_arena.flags.load(std::memory_order_relaxed) & + kCallMallocHook) != 0) { + // this call must be directly in the user-called allocator function + // for MallocHook::GetCallerStackTrace to work properly + MallocHook::InvokeNewHook(result, request); + } + return result; +} + +void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { + ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); + void *result = DoAllocWithArena(request, arena); + if ((arena->flags.load(std::memory_order_relaxed) & kCallMallocHook) != 0) { + // this call must be directly in the user-called allocator function + // for MallocHook::GetCallerStackTrace to work properly + MallocHook::InvokeNewHook(result, request); + } + return result; +} + +LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { + return &default_arena; +} + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_LOW_LEVEL_ALLOC_MISSING diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h new file mode 100644 index 000000000..d6eb813f0 --- /dev/null +++ b/absl/base/internal/low_level_alloc.h @@ -0,0 +1,120 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include + +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include + +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +class LowLevelAlloc { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + + // When used with DefaultArena(), the NewArena() and DeleteArena() calls + // obey the flags given explicitly in the NewArena() call, even if those + // flags differ from the settings in DefaultArena(). So the call + // NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe, + // as well as generatating an arena that provides async-signal-safe + // Alloc/Free. + }; + static Arena *NewArena(int32_t flags, Arena *meta_data_arena); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + + private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc new file mode 100644 index 000000000..5f60c3d9d --- /dev/null +++ b/absl/base/internal/low_level_alloc_test.cc @@ -0,0 +1,203 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/low_level_alloc.h" + +#include +#include +#include // NOLINT(build/c++11) +#include + +#include "absl/base/internal/malloc_hook.h" + +namespace absl { +namespace base_internal { +namespace { + +// This test doesn't use gtest since it needs to test that everything +// works before main(). +#define TEST_ASSERT(x) \ + if (!(x)) { \ + printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ + abort(); \ + } + +// a block of memory obtained from the allocator +struct BlockDesc { + char *ptr; // pointer to memory + int len; // number of bytes + int fill; // filled with data starting with this +}; + +// Check that the pattern placed in the block d +// by RandomizeBlockDesc is still there. +static void CheckBlockDesc(const BlockDesc &d) { + for (int i = 0; i != d.len; i++) { + TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); + } +} + +// Fill the block "*d" with a pattern +// starting with a random byte. +static void RandomizeBlockDesc(BlockDesc *d) { + d->fill = rand() & 0xff; + for (int i = 0; i != d->len; i++) { + d->ptr[i] = (d->fill + i) & 0xff; + } +} + +// Use to indicate to the malloc hooks that +// this calls is from LowLevelAlloc. +static bool using_low_level_alloc = false; + +// n times, toss a coin, and based on the outcome +// either allocate a new block or deallocate an old block. +// New blocks are placed in a std::unordered_map with a random key +// and initialized with RandomizeBlockDesc(). +// If keys conflict, the older block is freed. +// Old blocks are always checked with CheckBlockDesc() +// before being freed. At the end of the run, +// all remaining allocated blocks are freed. +// If use_new_arena is true, use a fresh arena, and then delete it. +// If call_malloc_hook is true and user_arena is true, +// allocations and deallocations are reported via the MallocHook +// interface. +static void Test(bool use_new_arena, bool call_malloc_hook, int n) { + typedef std::unordered_map AllocMap; + AllocMap allocated; + AllocMap::iterator it; + BlockDesc block_desc; + int rnd; + LowLevelAlloc::Arena *arena = 0; + if (use_new_arena) { + int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; + arena = LowLevelAlloc::NewArena(flags, LowLevelAlloc::DefaultArena()); + } + for (int i = 0; i != n; i++) { + if (i != 0 && i % 10000 == 0) { + printf("."); + fflush(stdout); + } + + switch (rand() & 1) { // toss a coin + case 0: // coin came up heads: add a block + using_low_level_alloc = true; + block_desc.len = rand() & 0x3fff; + block_desc.ptr = + reinterpret_cast( + arena == 0 + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + using_low_level_alloc = false; + RandomizeBlockDesc(&block_desc); + rnd = rand(); + it = allocated.find(rnd); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + it->second = block_desc; + } else { + allocated[rnd] = block_desc; + } + break; + case 1: // coin came up tails: remove a block + it = allocated.begin(); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + break; + } + } + // remove all remaining blocks + while ((it = allocated.begin()) != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + if (use_new_arena) { + TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); + } +} + +// used for counting allocates and frees +static int32_t allocates; +static int32_t frees; + +// ignore uses of the allocator not triggered by our test +static std::thread::id* test_tid; + +// called on each alloc if kCallMallocHook specified +static void AllocHook(const void *p, size_t size) { + if (using_low_level_alloc) { + if (*test_tid == std::this_thread::get_id()) { + allocates++; + } + } +} + +// called on each free if kCallMallocHook specified +static void FreeHook(const void *p) { + if (using_low_level_alloc) { + if (*test_tid == std::this_thread::get_id()) { + frees++; + } + } +} + +// LowLevelAlloc is designed to be safe to call before main(). +static struct BeforeMain { + BeforeMain() { + test_tid = new std::thread::id(std::this_thread::get_id()); + TEST_ASSERT(MallocHook::AddNewHook(&AllocHook)); + TEST_ASSERT(MallocHook::AddDeleteHook(&FreeHook)); + TEST_ASSERT(allocates == 0); + TEST_ASSERT(frees == 0); + Test(false, false, 50000); + TEST_ASSERT(allocates != 0); // default arena calls hooks + TEST_ASSERT(frees != 0); + for (int i = 0; i != 16; i++) { + bool call_hooks = ((i & 1) == 1); + allocates = 0; + frees = 0; + Test(true, call_hooks, 15000); + if (call_hooks) { + TEST_ASSERT(allocates > 5000); // arena calls hooks + TEST_ASSERT(frees > 5000); + } else { + TEST_ASSERT(allocates == 0); // arena doesn't call hooks + TEST_ASSERT(frees == 0); + } + } + TEST_ASSERT(MallocHook::RemoveNewHook(&AllocHook)); + TEST_ASSERT(MallocHook::RemoveDeleteHook(&FreeHook)); + } +} before_main; + +} // namespace +} // namespace base_internal +} // namespace absl + +int main(int argc, char *argv[]) { + // The actual test runs in the global constructor of `before_main`. + printf("PASS\n"); + return 0; +} diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h new file mode 100644 index 000000000..b9501076e --- /dev/null +++ b/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,104 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level //base interfaces such +// as SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl { +namespace base_internal { + +class SpinLock; // To allow use of SchedulingGuard. +class SchedulingHelper; // To allow use of SchedulingGuard. + +// SchedulingGuard +// Provides guard semantics that may be used to disable cooperative rescheduling +// of the calling thread within specific program blocks. This is used to +// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative +// scheduling depends on. +// +// Domain implementations capable of rescheduling in reaction to involuntary +// kernel thread actions (e.g blocking due to a pagefault or syscall) must +// guarantee that an annotated thread is not allowed to (cooperatively) +// reschedule until the annotated region is complete. +// +// It is an error to attempt to use a cooperatively scheduled resource (e.g. +// Mutex) within a rescheduling-disabled region. +// +// All methods are async-signal safe. +class SchedulingGuard { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable { + ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } + ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } + + bool disabled; + }; + + // Access to SchedulingGuard is explicitly white-listed. + friend class SchedulingHelper; + friend class SpinLock; + + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +//------------------------------------------------------------------------------ +inline bool SchedulingGuard::ReschedulingIsAllowed() { + return false; +} + +inline bool SchedulingGuard::DisableRescheduling() { + return false; +} + +inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { + return; +} + + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/absl/base/internal/malloc_extension.cc b/absl/base/internal/malloc_extension.cc new file mode 100644 index 000000000..daf4bc32e --- /dev/null +++ b/absl/base/internal/malloc_extension.cc @@ -0,0 +1,197 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/malloc_extension.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/malloc_extension_c.h" +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +// SysAllocator implementation +SysAllocator::~SysAllocator() {} +void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; } + +// Default implementation -- does nothing +MallocExtension::~MallocExtension() { } +bool MallocExtension::VerifyAllMemory() { return true; } +bool MallocExtension::VerifyNewMemory(const void*) { return true; } +bool MallocExtension::VerifyArrayNewMemory(const void*) { return true; } +bool MallocExtension::VerifyMallocMemory(const void*) { return true; } + +bool MallocExtension::GetNumericProperty(const char*, size_t*) { + return false; +} + +bool MallocExtension::SetNumericProperty(const char*, size_t) { + return false; +} + +void MallocExtension::GetStats(char* buffer, int length) { + assert(length > 0); + static_cast(length); + buffer[0] = '\0'; +} + +bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total, + int histogram[kMallocHistogramSize]) { + *blocks = 0; + *total = 0; + memset(histogram, 0, sizeof(*histogram) * kMallocHistogramSize); + return true; +} + +void MallocExtension::MarkThreadIdle() { + // Default implementation does nothing +} + +void MallocExtension::MarkThreadBusy() { + // Default implementation does nothing +} + +SysAllocator* MallocExtension::GetSystemAllocator() { + return nullptr; +} + +void MallocExtension::SetSystemAllocator(SysAllocator*) { + // Default implementation does nothing +} + +void MallocExtension::ReleaseToSystem(size_t) { + // Default implementation does nothing +} + +void MallocExtension::ReleaseFreeMemory() { + ReleaseToSystem(static_cast(-1)); // SIZE_T_MAX +} + +void MallocExtension::SetMemoryReleaseRate(double) { + // Default implementation does nothing +} + +double MallocExtension::GetMemoryReleaseRate() { + return -1.0; +} + +size_t MallocExtension::GetEstimatedAllocatedSize(size_t size) { + return size; +} + +size_t MallocExtension::GetAllocatedSize(const void* p) { + assert(GetOwnership(p) != kNotOwned); + static_cast(p); + return 0; +} + +MallocExtension::Ownership MallocExtension::GetOwnership(const void*) { + return kUnknownOwnership; +} + +void MallocExtension::GetProperties(MallocExtension::StatLevel, + std::map* result) { + result->clear(); +} + +size_t MallocExtension::ReleaseCPUMemory(int) { + return 0; +} + +// The current malloc extension object. + +std::atomic MallocExtension::current_instance_; + +MallocExtension* MallocExtension::InitModule() { + MallocExtension* ext = new MallocExtension; + current_instance_.store(ext, std::memory_order_release); + return ext; +} + +void MallocExtension::Register(MallocExtension* implementation) { + InitModuleOnce(); + // When running under valgrind, our custom malloc is replaced with + // valgrind's one and malloc extensions will not work. (Note: + // callers should be responsible for checking that they are the + // malloc that is really being run, before calling Register. This + // is just here as an extra sanity check.) + // Under compiler-based ThreadSanitizer RunningOnValgrind() returns true, + // but we still want to use malloc extensions. +#ifndef THREAD_SANITIZER + if (RunningOnValgrind()) { + return; + } +#endif // #ifndef THREAD_SANITIZER + current_instance_.store(implementation, std::memory_order_release); +} +void MallocExtension::GetHeapSample(MallocExtensionWriter*) {} + +void MallocExtension::GetHeapGrowthStacks(MallocExtensionWriter*) {} + +void MallocExtension::GetFragmentationProfile(MallocExtensionWriter*) {} + +} // namespace base_internal +} // namespace absl + +// These are C shims that work on the current instance. + +#define C_SHIM(fn, retval, paramlist, arglist) \ + extern "C" retval MallocExtension_##fn paramlist { \ + return absl::base_internal::MallocExtension::instance()->fn arglist; \ + } + +C_SHIM(VerifyAllMemory, int, (void), ()); +C_SHIM(VerifyNewMemory, int, (const void* p), (p)); +C_SHIM(VerifyArrayNewMemory, int, (const void* p), (p)); +C_SHIM(VerifyMallocMemory, int, (const void* p), (p)); +C_SHIM( + MallocMemoryStats, int, + (int* blocks, size_t* total, + int histogram[absl::base_internal::MallocExtension::kMallocHistogramSize]), + (blocks, total, histogram)); + +C_SHIM(GetStats, void, + (char* buffer, int buffer_length), (buffer, buffer_length)); +C_SHIM(GetNumericProperty, int, + (const char* property, size_t* value), (property, value)); +C_SHIM(SetNumericProperty, int, + (const char* property, size_t value), (property, value)); + +C_SHIM(MarkThreadIdle, void, (void), ()); +C_SHIM(MarkThreadBusy, void, (void), ()); +C_SHIM(ReleaseFreeMemory, void, (void), ()); +C_SHIM(ReleaseToSystem, void, (size_t num_bytes), (num_bytes)); +C_SHIM(GetEstimatedAllocatedSize, size_t, (size_t size), (size)); +C_SHIM(GetAllocatedSize, size_t, (const void* p), (p)); + +// Can't use the shim here because of the need to translate the enums. +extern "C" +MallocExtension_Ownership MallocExtension_GetOwnership(const void* p) { + return static_cast( + absl::base_internal::MallocExtension::instance()->GetOwnership(p)); +} + +// Default implementation just returns size. The expectation is that +// the linked-in malloc implementation might provide an override of +// this weak function with a better implementation. +ABSL_ATTRIBUTE_WEAK ABSL_ATTRIBUTE_NOINLINE size_t nallocx(size_t size, int) { + return size; +} diff --git a/absl/base/internal/malloc_extension.h b/absl/base/internal/malloc_extension.h new file mode 100644 index 000000000..dee4116b2 --- /dev/null +++ b/absl/base/internal/malloc_extension.h @@ -0,0 +1,424 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Extra extensions exported by some malloc implementations. These +// extensions are accessed through a virtual base class so an +// application can link against a malloc that does not implement these +// extensions, and it will get default versions that do nothing. +// +// NOTE FOR C USERS: If you wish to use this functionality from within +// a C program, see malloc_extension_c.h. + +#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_ +#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include "absl/base/macros.h" +#include "absl/base/port.h" +namespace absl { +namespace base_internal { + +class MallocExtensionWriter; + +// Interface to a pluggable system allocator. +class SysAllocator { + public: + SysAllocator() { + } + virtual ~SysAllocator(); + + // Allocates "size"-byte of memory from system aligned with "alignment". + // Returns null if failed. Otherwise, the returned pointer p up to and + // including (p + actual_size -1) have been allocated. + virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0; + + // Get a human-readable description of the current state of the + // allocator. The state is stored as a null-terminated std::string in + // a prefix of buffer. + virtual void GetStats(char* buffer, int length); +}; + +// The default implementations of the following routines do nothing. +// All implementations should be thread-safe; the current ones +// (DebugMallocImplementation and TCMallocImplementation) are. +class MallocExtension { + public: + virtual ~MallocExtension(); + + // Verifies that all blocks are valid. Returns true if all are; dumps + // core otherwise. A no-op except in debug mode. Even in debug mode, + // they may not do any checking except with certain malloc + // implementations. Thread-safe. + virtual bool VerifyAllMemory(); + + // Verifies that p was returned by new, has not been deleted, and is + // valid. Returns true if p is good; dumps core otherwise. A no-op + // except in debug mode. Even in debug mode, may not do any checking + // except with certain malloc implementations. Thread-safe. + virtual bool VerifyNewMemory(const void* p); + + // Verifies that p was returned by new[], has not been deleted, and is + // valid. Returns true if p is good; dumps core otherwise. A no-op + // except in debug mode. Even in debug mode, may not do any checking + // except with certain malloc implementations. Thread-safe. + virtual bool VerifyArrayNewMemory(const void* p); + + // Verifies that p was returned by malloc, has not been freed, and is + // valid. Returns true if p is good; dumps core otherwise. A no-op + // except in debug mode. Even in debug mode, may not do any checking + // except with certain malloc implementations. Thread-safe. + virtual bool VerifyMallocMemory(const void* p); + + // If statistics collection is enabled, sets *blocks to be the number of + // currently allocated blocks, sets *total to be the total size allocated + // over all blocks, sets histogram[n] to be the number of blocks with + // size between 2^n-1 and 2^(n+1), and returns true. Returns false, and + // does not change *blocks, *total, or *histogram, if statistics + // collection is disabled. + // + // Note that these statistics reflect memory allocated by new, new[], + // malloc(), and realloc(), but not mmap(). They may be larger (if not + // all pages have been written to) or smaller (if pages have been + // allocated by mmap()) than the total RSS size. They will always be + // smaller than the total virtual memory size. + static constexpr int kMallocHistogramSize = 64; + virtual bool MallocMemoryStats(int* blocks, size_t* total, + int histogram[kMallocHistogramSize]); + + // Get a human readable description of the current state of the malloc + // data structures. The state is stored as a null-terminated std::string + // in a prefix of "buffer[0,buffer_length-1]". + // REQUIRES: buffer_length > 0. + virtual void GetStats(char* buffer, int buffer_length); + + // Outputs to "writer" a sample of live objects and the stack traces + // that allocated these objects. The output can be passed to pprof. + virtual void GetHeapSample(MallocExtensionWriter* writer); + + // Outputs to "writer" the stack traces that caused growth in the + // address space size. The output can be passed to "pprof". + virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer); + + // Outputs to "writer" a fragmentation profile. The output can be + // passed to "pprof". In particular, the result is a list of + // tuples that says that "total" bytes in "n" + // objects are currently unusable because of fragmentation caused by + // an allocation with the specified "stacktrace". + virtual void GetFragmentationProfile(MallocExtensionWriter* writer); + + // ------------------------------------------------------------------- + // Control operations for getting and setting malloc implementation + // specific parameters. Some currently useful properties: + // + // generic + // ------- + // "generic.current_allocated_bytes" + // Number of bytes currently allocated by application + // This property is not writable. + // + // "generic.heap_size" + // Number of bytes in the heap == + // current_allocated_bytes + + // fragmentation + + // freed memory regions + // This property is not writable. + // + // tcmalloc + // -------- + // "tcmalloc.max_total_thread_cache_bytes" + // Upper limit on total number of bytes stored across all + // per-thread caches. Default: 16MB. + // + // "tcmalloc.current_total_thread_cache_bytes" + // Number of bytes used across all thread caches. + // This property is not writable. + // + // "tcmalloc.pageheap_free_bytes" + // Number of bytes in free, mapped pages in page heap. These + // bytes can be used to fulfill allocation requests. They + // always count towards virtual memory usage, and unless the + // underlying memory is swapped out by the OS, they also count + // towards physical memory usage. This property is not writable. + // + // "tcmalloc.pageheap_unmapped_bytes" + // Number of bytes in free, unmapped pages in page heap. + // These are bytes that have been released back to the OS, + // possibly by one of the MallocExtension "Release" calls. + // They can be used to fulfill allocation requests, but + // typically incur a page fault. They always count towards + // virtual memory usage, and depending on the OS, typically + // do not count towards physical memory usage. This property + // is not writable. + // + // "tcmalloc.per_cpu_caches_active" + // Whether tcmalloc is using per-CPU caches (1 or 0 respectively). + // This property is not writable. + // ------------------------------------------------------------------- + + // Get the named "property"'s value. Returns true if the property + // is known. Returns false if the property is not a valid property + // name for the current malloc implementation. + // REQUIRES: property != null; value != null + virtual bool GetNumericProperty(const char* property, size_t* value); + + // Set the named "property"'s value. Returns true if the property + // is known and writable. Returns false if the property is not a + // valid property name for the current malloc implementation, or + // is not writable. + // REQUIRES: property != null + virtual bool SetNumericProperty(const char* property, size_t value); + + // Mark the current thread as "idle". This routine may optionally + // be called by threads as a hint to the malloc implementation that + // any thread-specific resources should be released. Note: this may + // be an expensive routine, so it should not be called too often. + // + // Also, if the code that calls this routine will go to sleep for + // a while, it should take care to not allocate anything between + // the call to this routine and the beginning of the sleep. + // + // Most malloc implementations ignore this routine. + virtual void MarkThreadIdle(); + + // Mark the current thread as "busy". This routine should be + // called after MarkThreadIdle() if the thread will now do more + // work. If this method is not called, performance may suffer. + // + // Most malloc implementations ignore this routine. + virtual void MarkThreadBusy(); + + // Attempt to free any resources associated with cpu (in the sense + // of only being usable from that CPU.) Returns the number of bytes + // previously assigned to "cpu" that were freed. Safe to call from + // any processor, not just . + // + // Most malloc implementations ignore this routine (known exceptions: + // tcmalloc with --tcmalloc_per_cpu_caches=true.) + virtual size_t ReleaseCPUMemory(int cpu); + + // Gets the system allocator used by the malloc extension instance. Returns + // null for malloc implementations that do not support pluggable system + // allocators. + virtual SysAllocator* GetSystemAllocator(); + + // Sets the system allocator to the specified. + // + // Users could register their own system allocators for malloc implementation + // that supports pluggable system allocators, such as TCMalloc, by doing: + // alloc = new MyOwnSysAllocator(); + // MallocExtension::instance()->SetSystemAllocator(alloc); + // It's up to users whether to fall back (recommended) to the default + // system allocator (use GetSystemAllocator() above) or not. The caller is + // responsible to any necessary locking. + // See tcmalloc/system-alloc.h for the interface and + // tcmalloc/memfs_malloc.cc for the examples. + // + // It's a no-op for malloc implementations that do not support pluggable + // system allocators. + virtual void SetSystemAllocator(SysAllocator *a); + + // Try to release num_bytes of free memory back to the operating + // system for reuse. Use this extension with caution -- to get this + // memory back may require faulting pages back in by the OS, and + // that may be slow. (Currently only implemented in tcmalloc.) + virtual void ReleaseToSystem(size_t num_bytes); + + // Same as ReleaseToSystem() but release as much memory as possible. + virtual void ReleaseFreeMemory(); + + // Sets the rate at which we release unused memory to the system. + // Zero means we never release memory back to the system. Increase + // this flag to return memory faster; decrease it to return memory + // slower. Reasonable rates are in the range [0,10]. (Currently + // only implemented in tcmalloc). + virtual void SetMemoryReleaseRate(double rate); + + // Gets the release rate. Returns a value < 0 if unknown. + virtual double GetMemoryReleaseRate(); + + // Returns the estimated number of bytes that will be allocated for + // a request of "size" bytes. This is an estimate: an allocation of + // SIZE bytes may reserve more bytes, but will never reserve less. + // (Currently only implemented in tcmalloc, other implementations + // always return SIZE.) + // This is equivalent to malloc_good_size() in OS X. + virtual size_t GetEstimatedAllocatedSize(size_t size); + + // Returns the actual number N of bytes reserved by tcmalloc for the + // pointer p. This number may be equal to or greater than the + // number of bytes requested when p was allocated. + // + // This routine is just useful for statistics collection. The + // client must *not* read or write from the extra bytes that are + // indicated by this call. + // + // Example, suppose the client gets memory by calling + // p = malloc(10) + // and GetAllocatedSize(p) returns 16. The client must only use the + // first 10 bytes p[0..9], and not attempt to read or write p[10..15]. + // + // p must have been allocated by this malloc implementation, must + // not be an interior pointer -- that is, must be exactly the + // pointer returned to by malloc() et al., not some offset from that + // -- and should not have been freed yet. p may be null. + // (Currently only implemented in tcmalloc; other implementations + // will return 0.) + virtual size_t GetAllocatedSize(const void* p); + + // Returns kOwned if this malloc implementation allocated the memory + // pointed to by p, or kNotOwned if some other malloc implementation + // allocated it or p is null. May also return kUnknownOwnership if + // the malloc implementation does not keep track of ownership. + // REQUIRES: p must be a value returned from a previous call to + // malloc(), calloc(), realloc(), memalign(), posix_memalign(), + // valloc(), pvalloc(), new, or new[], and must refer to memory that + // is currently allocated (so, for instance, you should not pass in + // a pointer after having called free() on it). + enum Ownership { + // NOTE: Enum values MUST be kept in sync with the version in + // malloc_extension_c.h + kUnknownOwnership = 0, + kOwned, + kNotOwned + }; + virtual Ownership GetOwnership(const void* p); + + // The current malloc implementation. Always non-null. + static MallocExtension* instance() { + InitModuleOnce(); + return current_instance_.load(std::memory_order_acquire); + } + + // Change the malloc implementation. Typically called by the + // malloc implementation during initialization. + static void Register(MallocExtension* implementation); + + // Type used by GetProperties. See comment on GetProperties. + struct Property { + size_t value; + // Stores breakdown of the property value bucketed by object size. + struct Bucket { + size_t min_object_size; + size_t max_object_size; + size_t size; + }; + // Empty unless detailed info was asked for and this type has buckets + std::vector buckets; + }; + + // Type used by GetProperties. See comment on GetProperties. + enum StatLevel { kSummary, kDetailed }; + + // Stores in *result detailed statistics about the malloc + // implementation. *result will be a map keyed by the name of + // the statistic. Each statistic has at least a "value" field. + // + // Some statistics may also contain an array of buckets if + // level==kDetailed and the "value" can be subdivided + // into different buckets for different object sizes. If + // such detailed statistics are not available, Property::buckets + // will be empty. Otherwise Property::buckets will contain + // potentially many entries. For each bucket b, b.value + // will count the value contributed by objects in the range + // [b.min_object_size, b.max_object_size]. + // + // Common across malloc implementations: + // generic.bytes_in_use_by_app -- Bytes currently in use by application + // generic.physical_memory_used -- Overall (including malloc internals) + // generic.virtual_memory_used -- Overall (including malloc internals) + // + // Tcmalloc specific properties + // tcmalloc.cpu_free -- Bytes in per-cpu free-lists + // tcmalloc.thread_cache_free -- Bytes in per-thread free-lists + // tcmalloc.transfer_cache -- Bytes in cross-thread transfer caches + // tcmalloc.central_cache_free -- Bytes in central cache + // tcmalloc.page_heap_free -- Bytes in page heap + // tcmalloc.page_heap_unmapped -- Bytes in page heap (no backing phys. mem) + // tcmalloc.metadata_bytes -- Used by internal data structures + // tcmalloc.thread_cache_count -- Number of thread caches in use + // + // Debug allocator + // debug.free_queue -- Recently freed objects + virtual void GetProperties(StatLevel level, + std::map* result); + private: + static MallocExtension* InitModule(); + + static void InitModuleOnce() { + // Pointer stored here so heap leak checker will consider the default + // instance reachable, even if current_instance_ is later overridden by + // MallocExtension::Register(). + ABSL_ATTRIBUTE_UNUSED static MallocExtension* default_instance = + InitModule(); + } + + static std::atomic current_instance_; +}; + +// Base class than can handle output generated by GetHeapSample() and +// GetHeapGrowthStacks(). Use the available subclass or roll your +// own. Useful if you want explicit control over the type of output +// buffer used (e.g. IOBuffer, Cord, etc.) +class MallocExtensionWriter { + public: + virtual ~MallocExtensionWriter() {} + virtual void Write(const char* buf, int len) = 0; + protected: + MallocExtensionWriter() {} + MallocExtensionWriter(const MallocExtensionWriter&) = delete; + MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete; +}; + +// A subclass that writes to the std::string "out". NOTE: The generated +// data is *appended* to "*out". I.e., the old contents of "*out" are +// preserved. +class StringMallocExtensionWriter : public MallocExtensionWriter { + public: + explicit StringMallocExtensionWriter(std::string* out) : out_(out) {} + virtual void Write(const char* buf, int len) { + out_->append(buf, len); + } + + private: + std::string* const out_; + StringMallocExtensionWriter(const StringMallocExtensionWriter&) = delete; + StringMallocExtensionWriter& operator=(const StringMallocExtensionWriter&) = + delete; +}; + +} // namespace base_internal +} // namespace absl + +// The nallocx function allocates no memory, but it performs the same size +// computation as the malloc function, and returns the real size of the +// allocation that would result from the equivalent malloc function call. +// Default weak implementation returns size unchanged, but tcmalloc overrides it +// and returns rounded up size. See the following link for details: +// http://www.unix.com/man-page/freebsd/3/nallocx/ +extern "C" size_t nallocx(size_t size, int flags); + +#ifndef MALLOCX_LG_ALIGN +#define MALLOCX_LG_ALIGN(la) (la) +#endif + +#endif // ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_ diff --git a/absl/base/internal/malloc_extension_c.h b/absl/base/internal/malloc_extension_c.h new file mode 100644 index 000000000..5afc84a43 --- /dev/null +++ b/absl/base/internal/malloc_extension_c.h @@ -0,0 +1,75 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * C shims for the C++ malloc_extension.h. See malloc_extension.h for + * details. Note these C shims always work on + * MallocExtension::instance(); it is not possible to have more than + * one MallocExtension object in C applications. + */ + +#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ +#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define kMallocExtensionHistogramSize 64 + +int MallocExtension_VerifyAllMemory(void); +int MallocExtension_VerifyNewMemory(const void* p); +int MallocExtension_VerifyArrayNewMemory(const void* p); +int MallocExtension_VerifyMallocMemory(const void* p); +int MallocExtension_MallocMemoryStats(int* blocks, size_t* total, + int histogram[kMallocExtensionHistogramSize]); + +void MallocExtension_GetStats(char* buffer, int buffer_length); + +/* TODO(csilvers): write a C version of these routines, that perhaps + * takes a function ptr and a void *. + */ +/* void MallocExtension_GetHeapSample(MallocExtensionWriter* result); */ +/* void MallocExtension_GetHeapGrowthStacks(MallocExtensionWriter* result); */ + +int MallocExtension_GetNumericProperty(const char* property, size_t* value); +int MallocExtension_SetNumericProperty(const char* property, size_t value); +void MallocExtension_MarkThreadIdle(void); +void MallocExtension_MarkThreadBusy(void); +void MallocExtension_ReleaseToSystem(size_t num_bytes); +void MallocExtension_ReleaseFreeMemory(void); +size_t MallocExtension_GetEstimatedAllocatedSize(size_t size); +size_t MallocExtension_GetAllocatedSize(const void* p); + +/* + * NOTE: These enum values MUST be kept in sync with the version in + * malloc_extension.h + */ +typedef enum { + MallocExtension_kUnknownOwnership = 0, + MallocExtension_kOwned, + MallocExtension_kNotOwned +} MallocExtension_Ownership; + +MallocExtension_Ownership MallocExtension_GetOwnership(const void* p); + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /* ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ */ diff --git a/absl/base/internal/malloc_extension_test.cc b/absl/base/internal/malloc_extension_test.cc new file mode 100644 index 000000000..246875a03 --- /dev/null +++ b/absl/base/internal/malloc_extension_test.cc @@ -0,0 +1,102 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/malloc_extension.h" +#include "absl/base/internal/malloc_extension_c.h" + +namespace absl { +namespace base_internal { +namespace { + +TEST(MallocExtension, MallocExtension) { + void* a = malloc(1000); + + size_t cxx_bytes_used, c_bytes_used; + if (!MallocExtension::instance()->GetNumericProperty( + "generic.current_allocated_bytes", &cxx_bytes_used)) { + EXPECT_TRUE(ABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION); + } else { + ASSERT_TRUE(MallocExtension::instance()->GetNumericProperty( + "generic.current_allocated_bytes", &cxx_bytes_used)); + ASSERT_TRUE(MallocExtension_GetNumericProperty( + "generic.current_allocated_bytes", &c_bytes_used)); +#ifndef MEMORY_SANITIZER + EXPECT_GT(cxx_bytes_used, 1000); + EXPECT_GT(c_bytes_used, 1000); +#endif + + EXPECT_TRUE(MallocExtension::instance()->VerifyAllMemory()); + EXPECT_TRUE(MallocExtension_VerifyAllMemory()); + + EXPECT_EQ(MallocExtension::kOwned, + MallocExtension::instance()->GetOwnership(a)); + // TODO(csilvers): this relies on undocumented behavior that + // GetOwnership works on stack-allocated variables. Use a better test. + EXPECT_EQ(MallocExtension::kNotOwned, + MallocExtension::instance()->GetOwnership(&cxx_bytes_used)); + EXPECT_EQ(MallocExtension::kNotOwned, + MallocExtension::instance()->GetOwnership(nullptr)); + EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000); + // This is just a sanity check. If we allocated too much, tcmalloc is + // broken + EXPECT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000); + EXPECT_GE(MallocExtension::instance()->GetEstimatedAllocatedSize(1000), + 1000); + for (int i = 0; i < 10; ++i) { + void* p = malloc(i); + EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(p), + MallocExtension::instance()->GetEstimatedAllocatedSize(i)); + free(p); + } + + // Check the c-shim version too. + EXPECT_EQ(MallocExtension_kOwned, MallocExtension_GetOwnership(a)); + EXPECT_EQ(MallocExtension_kNotOwned, + MallocExtension_GetOwnership(&cxx_bytes_used)); + EXPECT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(nullptr)); + EXPECT_GE(MallocExtension_GetAllocatedSize(a), 1000); + EXPECT_LE(MallocExtension_GetAllocatedSize(a), 5000); + EXPECT_GE(MallocExtension_GetEstimatedAllocatedSize(1000), 1000); + } + + free(a); +} + +// Verify that the .cc file and .h file have the same enum values. +TEST(GetOwnership, EnumValuesEqualForCAndCXX) { + EXPECT_EQ(static_cast(MallocExtension::kUnknownOwnership), + static_cast(MallocExtension_kUnknownOwnership)); + EXPECT_EQ(static_cast(MallocExtension::kOwned), + static_cast(MallocExtension_kOwned)); + EXPECT_EQ(static_cast(MallocExtension::kNotOwned), + static_cast(MallocExtension_kNotOwned)); +} + +TEST(nallocx, SaneBehavior) { + for (size_t size = 0; size < 64 * 1024; ++size) { + size_t alloc_size = nallocx(size, 0); + EXPECT_LE(size, alloc_size) << "size is " << size; + EXPECT_LE(alloc_size, std::max(size + 100, 2 * size)) << "size is " << size; + } +} + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/malloc_hook.cc b/absl/base/internal/malloc_hook.cc new file mode 100644 index 000000000..d5d227a5f --- /dev/null +++ b/absl/base/internal/malloc_hook.cc @@ -0,0 +1,611 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" + +#if ABSL_HAVE_MMAP +// Disable the glibc prototype of mremap(), as older versions of the +// system headers define this function with only four arguments, +// whereas newer versions allow an optional fifth argument: +#define mremap glibc_mremap +#include +#undef mremap +#endif + +#include +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/casts.h" +#include "absl/base/internal/malloc_hook.h" +#include "absl/base/internal/malloc_hook_invoke.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +// __THROW is defined in glibc systems. It means, counter-intuitively, +// "This function will never throw an exception." It's an optional +// optimization tool, but we may need to use it to match glibc prototypes. +#ifndef __THROW // I guess we're not on a glibc system +# define __THROW // __THROW is just an optimization, so ok to make it "" +#endif + +namespace absl { +namespace base_internal { +namespace { + +void RemoveInitialHooksAndCallInitializers(); // below. + +absl::once_flag once; + +// These hooks are installed in MallocHook as the only initial hooks. The first +// hook that is called will run RemoveInitialHooksAndCallInitializers (see the +// definition below) and then redispatch to any malloc hooks installed by +// RemoveInitialHooksAndCallInitializers. +// +// Note(llib): there is a possibility of a race in the event that there are +// multiple threads running before the first allocation. This is pretty +// difficult to achieve, but if it is then multiple threads may concurrently do +// allocations. The first caller will call +// RemoveInitialHooksAndCallInitializers via one of the initial hooks. A +// concurrent allocation may, depending on timing either: +// * still have its initial malloc hook installed, run that and block on waiting +// for the first caller to finish its call to +// RemoveInitialHooksAndCallInitializers, and proceed normally. +// * occur some time during the RemoveInitialHooksAndCallInitializers call, at +// which point there could be no initial hooks and the subsequent hooks that +// are about to be set up by RemoveInitialHooksAndCallInitializers haven't +// been installed yet. I think the worst we can get is that some allocations +// will not get reported to some hooks set by the initializers called from +// RemoveInitialHooksAndCallInitializers. + +void InitialNewHook(const void* ptr, size_t size) { + absl::call_once(once, RemoveInitialHooksAndCallInitializers); + MallocHook::InvokeNewHook(ptr, size); +} + +void InitialPreMMapHook(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + absl::call_once(once, RemoveInitialHooksAndCallInitializers); + MallocHook::InvokePreMmapHook(start, size, protection, flags, fd, offset); +} + +void InitialPreSbrkHook(ptrdiff_t increment) { + absl::call_once(once, RemoveInitialHooksAndCallInitializers); + MallocHook::InvokePreSbrkHook(increment); +} + +// This function is called at most once by one of the above initial malloc +// hooks. It removes all initial hooks and initializes all other clients that +// want to get control at the very first memory allocation. The initializers +// may assume that the initial malloc hooks have been removed. The initializers +// may set up malloc hooks and allocate memory. +void RemoveInitialHooksAndCallInitializers() { + ABSL_RAW_CHECK(MallocHook::RemoveNewHook(&InitialNewHook), ""); + ABSL_RAW_CHECK(MallocHook::RemovePreMmapHook(&InitialPreMMapHook), ""); + ABSL_RAW_CHECK(MallocHook::RemovePreSbrkHook(&InitialPreSbrkHook), ""); +} + +} // namespace +} // namespace base_internal +} // namespace absl + +namespace absl { +namespace base_internal { + +// This lock is shared between all implementations of HookList::Add & Remove. +// The potential for contention is very small. This needs to be a SpinLock and +// not a Mutex since it's possible for Mutex locking to allocate memory (e.g., +// per-thread allocation in debug builds), which could cause infinite recursion. +static absl::base_internal::SpinLock hooklist_spinlock( + absl::base_internal::kLinkerInitialized); + +template +bool HookList::Add(T value_as_t) { + if (value_as_t == T()) { + return false; + } + absl::base_internal::SpinLockHolder l(&hooklist_spinlock); + // Find the first slot in data that is 0. + int index = 0; + while ((index < kHookListMaxValues) && + (priv_data[index].load(std::memory_order_relaxed) != 0)) { + ++index; + } + if (index == kHookListMaxValues) { + return false; + } + int prev_num_hooks = priv_end.load(std::memory_order_acquire); + priv_data[index].store(reinterpret_cast(value_as_t), + std::memory_order_release); + if (prev_num_hooks <= index) { + priv_end.store(index + 1, std::memory_order_release); + } + return true; +} + +template +bool HookList::Remove(T value_as_t) { + if (value_as_t == T()) { + return false; + } + absl::base_internal::SpinLockHolder l(&hooklist_spinlock); + int hooks_end = priv_end.load(std::memory_order_acquire); + int index = 0; + while (index < hooks_end && + value_as_t != reinterpret_cast( + priv_data[index].load(std::memory_order_acquire))) { + ++index; + } + if (index == hooks_end) { + return false; + } + priv_data[index].store(0, std::memory_order_release); + if (hooks_end == index + 1) { + // Adjust hooks_end down to the lowest possible value. + hooks_end = index; + while ((hooks_end > 0) && + (priv_data[hooks_end - 1].load(std::memory_order_acquire) == 0)) { + --hooks_end; + } + priv_end.store(hooks_end, std::memory_order_release); + } + return true; +} + +template +int HookList::Traverse(T* output_array, int n) const { + int hooks_end = priv_end.load(std::memory_order_acquire); + int actual_hooks_end = 0; + for (int i = 0; i < hooks_end && n > 0; ++i) { + T data = reinterpret_cast(priv_data[i].load(std::memory_order_acquire)); + if (data != T()) { + *output_array++ = data; + ++actual_hooks_end; + --n; + } + } + return actual_hooks_end; +} + +// Initialize a HookList (optionally with the given initial_value in index 0). +#define INIT_HOOK_LIST { {0}, {{}} } +#define INIT_HOOK_LIST_WITH_VALUE(initial_value) \ + { {1}, { {reinterpret_cast(initial_value)} } } + +// Explicit instantiation for malloc_hook_test.cc. This ensures all the methods +// are instantiated. +template struct HookList; + +HookList new_hooks_ = + INIT_HOOK_LIST_WITH_VALUE(&InitialNewHook); +HookList delete_hooks_ = INIT_HOOK_LIST; +HookList sampled_new_hooks_ = INIT_HOOK_LIST; +HookList sampled_delete_hooks_ = INIT_HOOK_LIST; +HookList premmap_hooks_ = + INIT_HOOK_LIST_WITH_VALUE(&InitialPreMMapHook); +HookList mmap_hooks_ = INIT_HOOK_LIST; +HookList munmap_hooks_ = INIT_HOOK_LIST; +HookList mremap_hooks_ = INIT_HOOK_LIST; +HookList presbrk_hooks_ = + INIT_HOOK_LIST_WITH_VALUE(InitialPreSbrkHook); +HookList sbrk_hooks_ = INIT_HOOK_LIST; + +// These lists contain either 0 or 1 hooks. +HookList mmap_replacement_ = INIT_HOOK_LIST; +HookList munmap_replacement_ = INIT_HOOK_LIST; + +#undef INIT_HOOK_LIST_WITH_VALUE +#undef INIT_HOOK_LIST + +} // namespace base_internal +} // namespace absl + +// These are available as C bindings as well as C++, hence their +// definition outside the MallocHook class. +extern "C" +int MallocHook_AddNewHook(MallocHook_NewHook hook) { + return absl::base_internal::new_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveNewHook(MallocHook_NewHook hook) { + return absl::base_internal::new_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook) { + return absl::base_internal::delete_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook) { + return absl::base_internal::delete_hooks_.Remove(hook); +} + +extern "C" int MallocHook_AddSampledNewHook(MallocHook_SampledNewHook hook) { + return absl::base_internal::sampled_new_hooks_.Add(hook); +} + +extern "C" int MallocHook_RemoveSampledNewHook(MallocHook_SampledNewHook hook) { + return absl::base_internal::sampled_new_hooks_.Remove(hook); +} + +extern "C" int MallocHook_AddSampledDeleteHook( + MallocHook_SampledDeleteHook hook) { + return absl::base_internal::sampled_delete_hooks_.Add(hook); +} + +extern "C" int MallocHook_RemoveSampledDeleteHook( + MallocHook_SampledDeleteHook hook) { + return absl::base_internal::sampled_delete_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook) { + return absl::base_internal::premmap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook) { + return absl::base_internal::premmap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook) { + // NOTE this is a best effort CHECK. Concurrent sets could succeed since + // this test is outside of the Add spin lock. + ABSL_RAW_CHECK(absl::base_internal::mmap_replacement_.empty(), + "Only one MMapReplacement is allowed."); + return absl::base_internal::mmap_replacement_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook) { + return absl::base_internal::mmap_replacement_.Remove(hook); +} + +extern "C" +int MallocHook_AddMmapHook(MallocHook_MmapHook hook) { + return absl::base_internal::mmap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook) { + return absl::base_internal::mmap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook) { + return absl::base_internal::munmap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook) { + return absl::base_internal::munmap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook) { + // NOTE this is a best effort CHECK. Concurrent sets could succeed since + // this test is outside of the Add spin lock. + ABSL_RAW_CHECK(absl::base_internal::munmap_replacement_.empty(), + "Only one MunmapReplacement is allowed."); + return absl::base_internal::munmap_replacement_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook) { + return absl::base_internal::munmap_replacement_.Remove(hook); +} + +extern "C" +int MallocHook_AddMremapHook(MallocHook_MremapHook hook) { + return absl::base_internal::mremap_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook) { + return absl::base_internal::mremap_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook) { + return absl::base_internal::presbrk_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook) { + return absl::base_internal::presbrk_hooks_.Remove(hook); +} + +extern "C" +int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook) { + return absl::base_internal::sbrk_hooks_.Add(hook); +} + +extern "C" +int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook) { + return absl::base_internal::sbrk_hooks_.Remove(hook); +} + +namespace absl { +namespace base_internal { + +// Note: embedding the function calls inside the traversal of HookList would be +// very confusing, as it is legal for a hook to remove itself and add other +// hooks. Doing traversal first, and then calling the hooks ensures we only +// call the hooks registered at the start. +#define INVOKE_HOOKS(HookType, hook_list, args) \ + do { \ + HookType hooks[kHookListMaxValues]; \ + int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \ + for (int i = 0; i < num_hooks; ++i) { \ + (*hooks[i]) args; \ + } \ + } while (0) + +// There should only be one replacement. Return the result of the first +// one, or false if there is none. +#define INVOKE_REPLACEMENT(HookType, hook_list, args) \ + do { \ + HookType hooks[kHookListMaxValues]; \ + int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \ + return (num_hooks > 0 && (*hooks[0])args); \ + } while (0) + +void MallocHook::InvokeNewHookSlow(const void* ptr, size_t size) { + INVOKE_HOOKS(NewHook, new_hooks_, (ptr, size)); +} + +void MallocHook::InvokeDeleteHookSlow(const void* ptr) { + INVOKE_HOOKS(DeleteHook, delete_hooks_, (ptr)); +} + +void MallocHook::InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc) { + INVOKE_HOOKS(SampledNewHook, sampled_new_hooks_, (sampled_alloc)); +} + +void MallocHook::InvokeSampledDeleteHookSlow(AllocHandle handle) { + INVOKE_HOOKS(SampledDeleteHook, sampled_delete_hooks_, (handle)); +} + +void MallocHook::InvokePreMmapHookSlow(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + INVOKE_HOOKS(PreMmapHook, premmap_hooks_, (start, size, protection, flags, fd, + offset)); +} + +void MallocHook::InvokeMmapHookSlow(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + INVOKE_HOOKS(MmapHook, mmap_hooks_, (result, start, size, protection, flags, + fd, offset)); +} + +bool MallocHook::InvokeMmapReplacementSlow(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result) { + INVOKE_REPLACEMENT(MmapReplacement, mmap_replacement_, + (start, size, protection, flags, fd, offset, result)); +} + +void MallocHook::InvokeMunmapHookSlow(const void* start, size_t size) { + INVOKE_HOOKS(MunmapHook, munmap_hooks_, (start, size)); +} + +bool MallocHook::InvokeMunmapReplacementSlow(const void* start, + size_t size, + int* result) { + INVOKE_REPLACEMENT(MunmapReplacement, munmap_replacement_, + (start, size, result)); +} + +void MallocHook::InvokeMremapHookSlow(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr) { + INVOKE_HOOKS(MremapHook, mremap_hooks_, (result, old_addr, old_size, new_size, + flags, new_addr)); +} + +void MallocHook::InvokePreSbrkHookSlow(ptrdiff_t increment) { + INVOKE_HOOKS(PreSbrkHook, presbrk_hooks_, (increment)); +} + +void MallocHook::InvokeSbrkHookSlow(const void* result, ptrdiff_t increment) { + INVOKE_HOOKS(SbrkHook, sbrk_hooks_, (result, increment)); +} + +#undef INVOKE_HOOKS +#undef INVOKE_REPLACEMENT + +} // namespace base_internal +} // namespace absl + +ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc); +ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc); +// actual functions are in debugallocation.cc or tcmalloc.cc +ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(malloc_hook); +ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(malloc_hook); +// actual functions are in this file, malloc_hook.cc, and low_level_alloc.cc +ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(blink_malloc); +ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(blink_malloc); +// actual functions are in third_party/blink_headless/.../{PartitionAlloc, +// FastMalloc}.cpp. + +#define ADDR_IN_ATTRIBUTE_SECTION(addr, name) \ + (reinterpret_cast(ABSL_ATTRIBUTE_SECTION_START(name)) <= \ + reinterpret_cast(addr) && \ + reinterpret_cast(addr) < \ + reinterpret_cast(ABSL_ATTRIBUTE_SECTION_STOP(name))) + +// Return true iff 'caller' is a return address within a function +// that calls one of our hooks via MallocHook:Invoke*. +// A helper for GetCallerStackTrace. +static inline bool InHookCaller(const void* caller) { + return ADDR_IN_ATTRIBUTE_SECTION(caller, google_malloc) || + ADDR_IN_ATTRIBUTE_SECTION(caller, malloc_hook) || + ADDR_IN_ATTRIBUTE_SECTION(caller, blink_malloc); + // We can use one section for everything except tcmalloc_or_debug + // due to its special linkage mode, which prevents merging of the sections. +} + +#undef ADDR_IN_ATTRIBUTE_SECTION + +static absl::once_flag in_hook_caller_once; + +static void InitializeInHookCaller() { + ABSL_INIT_ATTRIBUTE_SECTION_VARS(google_malloc); + if (ABSL_ATTRIBUTE_SECTION_START(google_malloc) == + ABSL_ATTRIBUTE_SECTION_STOP(google_malloc)) { + ABSL_RAW_LOG(ERROR, + "google_malloc section is missing, " + "thus InHookCaller is broken!"); + } + ABSL_INIT_ATTRIBUTE_SECTION_VARS(malloc_hook); + if (ABSL_ATTRIBUTE_SECTION_START(malloc_hook) == + ABSL_ATTRIBUTE_SECTION_STOP(malloc_hook)) { + ABSL_RAW_LOG(ERROR, + "malloc_hook section is missing, " + "thus InHookCaller is broken!"); + } + ABSL_INIT_ATTRIBUTE_SECTION_VARS(blink_malloc); + // The blink_malloc section is only expected to be present in binaries + // linking against the blink rendering engine in third_party/blink_headless. +} + +// We can improve behavior/compactness of this function +// if we pass a generic test function (with a generic arg) +// into the implementations for get_stack_trace_fn instead of the skip_count. +extern "C" int MallocHook_GetCallerStackTrace( + void** result, int max_depth, int skip_count, + MallocHook_GetStackTraceFn get_stack_trace_fn) { + if (!ABSL_HAVE_ATTRIBUTE_SECTION) { + // Fall back to get_stack_trace_fn and good old but fragile frame skip + // counts. + // Note: this path is inaccurate when a hook is not called directly by an + // allocation function but is daisy-chained through another hook, + // search for MallocHook::(Get|Set|Invoke)* to find such cases. +#ifdef NDEBUG + return get_stack_trace_fn(result, max_depth, skip_count); +#else + return get_stack_trace_fn(result, max_depth, skip_count + 1); +#endif + // due to -foptimize-sibling-calls in opt mode + // there's no need for extra frame skip here then + } + absl::call_once(in_hook_caller_once, InitializeInHookCaller); + // MallocHook caller determination via InHookCaller works, use it: + static const int kMaxSkip = 32 + 6 + 3; + // Constant tuned to do just one get_stack_trace_fn call below in practice + // and not get many frames that we don't actually need: + // currently max passed max_depth is 32, + // max passed/needed skip_count is 6 + // and 3 is to account for some hook daisy chaining. + static const int kStackSize = kMaxSkip + 1; + void* stack[kStackSize]; + int depth = + get_stack_trace_fn(stack, kStackSize, 1); // skip this function frame + if (depth == 0) + // silently propagate cases when get_stack_trace_fn does not work + return 0; + for (int i = depth - 1; i >= 0; --i) { // stack[0] is our immediate caller + if (InHookCaller(stack[i])) { + i += 1; // skip hook caller frame + depth -= i; // correct depth + if (depth > max_depth) depth = max_depth; + std::copy(stack + i, stack + i + depth, result); + if (depth < max_depth && depth + i == kStackSize) { + // get frames for the missing depth + depth += get_stack_trace_fn(result + depth, max_depth - depth, + 1 + kStackSize); + } + return depth; + } + } + ABSL_RAW_LOG(WARNING, + "Hooked allocator frame not found, returning empty trace"); + // If this happens try increasing kMaxSkip + // or else something must be wrong with InHookCaller, + // e.g. for every section used in InHookCaller + // all functions in that section must be inside the same library. + return 0; +} + +// On systems where we know how, we override mmap/munmap/mremap/sbrk +// to provide support for calling the related hooks (in addition, +// of course, to doing what these functions normally do). + +// The ABSL_MALLOC_HOOK_MMAP_DISABLE macro disables mmap/munmap interceptors. +// Dynamic tools that intercept mmap/munmap can't be linked together with +// malloc_hook interceptors. We disable the malloc_hook interceptors for the +// widely-used dynamic tools, i.e. ThreadSanitizer and MemorySanitizer, but +// still allow users to disable this in special cases that can't be easily +// detected during compilation, via -DABSL_MALLOC_HOOK_MMAP_DISABLE or #define +// ABSL_MALLOC_HOOK_MMAP_DISABLE. +// TODO(b/62370839): Remove MALLOC_HOOK_MMAP_DISABLE in CROSSTOOL for tsan and +// msan config; Replace MALLOC_HOOK_MMAP_DISABLE with +// ABSL_MALLOC_HOOK_MMAP_DISABLE for other special cases. +#if !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) && \ + !defined(ABSL_MALLOC_HOOK_MMAP_DISABLE) && defined(__linux__) +#include "absl/base/internal/malloc_hook_mmap_linux.inc" + +#elif ABSL_HAVE_MMAP + +namespace absl { +namespace base_internal { + +// static +void* MallocHook::UnhookedMMap(void* start, size_t size, int protection, + int flags, int fd, off_t offset) { + void* result; + if (!MallocHook::InvokeMmapReplacement( + start, size, protection, flags, fd, offset, &result)) { + result = mmap(start, size, protection, flags, fd, offset); + } + return result; +} + +// static +int MallocHook::UnhookedMUnmap(void* start, size_t size) { + int result; + if (!MallocHook::InvokeMunmapReplacement(start, size, &result)) { + result = munmap(start, size); + } + return result; +} + +} // namespace base_internal +} // namespace absl + +#endif diff --git a/absl/base/internal/malloc_hook.h b/absl/base/internal/malloc_hook.h new file mode 100644 index 000000000..ed5cf2e65 --- /dev/null +++ b/absl/base/internal/malloc_hook.h @@ -0,0 +1,333 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Some of our malloc implementations can invoke the following hooks whenever +// memory is allocated or deallocated. MallocHook is thread-safe, and things +// you do before calling AddFooHook(MyHook) are visible to any resulting calls +// to MyHook. Hooks must be thread-safe. If you write: +// +// CHECK(MallocHook::AddNewHook(&MyNewHook)); +// +// MyNewHook will be invoked in subsequent calls in the current thread, but +// there are no guarantees on when it might be invoked in other threads. +// +// There are a limited number of slots available for each hook type. Add*Hook +// will return false if there are no slots available. Remove*Hook will return +// false if the given hook was not already installed. +// +// The order in which individual hooks are called in Invoke*Hook is undefined. +// +// It is safe for a hook to remove itself within Invoke*Hook and add other +// hooks. Any hooks added inside a hook invocation (for the same hook type) +// will not be invoked for the current invocation. +// +// One important user of these hooks is the heap profiler. +// +// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be +// directly in the code of the (de)allocation function that is provided to the +// user and that function must have an ABSL_ATTRIBUTE_SECTION(malloc_hook) +// attribute. +// +// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you +// need to invoke a hook (which you shouldn't unless you're part of tcmalloc), +// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h. +// +// NOTE FOR C USERS: If you want to use malloc_hook functionality from +// a C program, #include malloc_hook_c.h instead of this file. +// +// IWYU pragma: private, include "base/malloc_hook.h" + +#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_H_ +#define ABSL_BASE_INTERNAL_MALLOC_HOOK_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/malloc_hook_c.h" +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +// Note: malloc_hook_c.h defines MallocHook_*Hook and +// MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook +// class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h +// for details of these types/functions. + +class MallocHook { + public: + // The NewHook is invoked whenever an object is being allocated. + // Object pointer and size are passed in. + // It may be passed null pointer if the allocator returned null. + typedef MallocHook_NewHook NewHook; + inline static bool AddNewHook(NewHook hook) { + return MallocHook_AddNewHook(hook); + } + inline static bool RemoveNewHook(NewHook hook) { + return MallocHook_RemoveNewHook(hook); + } + inline static void InvokeNewHook(const void* ptr, size_t size); + + // The DeleteHook is invoked whenever an object is being deallocated. + // Object pointer is passed in. + // It may be passed null pointer if the caller is trying to delete null. + typedef MallocHook_DeleteHook DeleteHook; + inline static bool AddDeleteHook(DeleteHook hook) { + return MallocHook_AddDeleteHook(hook); + } + inline static bool RemoveDeleteHook(DeleteHook hook) { + return MallocHook_RemoveDeleteHook(hook); + } + inline static void InvokeDeleteHook(const void* ptr); + + // The SampledNewHook is invoked for some subset of object allocations + // according to the sampling policy of an allocator such as tcmalloc. + // SampledAlloc has the following fields: + // * AllocHandle handle: to be set to an effectively unique value (in this + // process) by allocator. + // * size_t allocated_size: space actually used by allocator to host + // the object. + // * int stack_depth and const void* stack: invocation stack for + // the allocation. + // The allocator invoking the hook should record the handle value and later + // call InvokeSampledDeleteHook() with that value. + typedef MallocHook_SampledNewHook SampledNewHook; + typedef MallocHook_SampledAlloc SampledAlloc; + inline static bool AddSampledNewHook(SampledNewHook hook) { + return MallocHook_AddSampledNewHook(hook); + } + inline static bool RemoveSampledNewHook(SampledNewHook hook) { + return MallocHook_RemoveSampledNewHook(hook); + } + inline static void InvokeSampledNewHook(const SampledAlloc* sampled_alloc); + + // The SampledDeleteHook is invoked whenever an object previously chosen + // by an allocator for sampling is being deallocated. + // The handle identifying the object --as previously chosen by + // InvokeSampledNewHook()-- is passed in. + typedef MallocHook_SampledDeleteHook SampledDeleteHook; + typedef MallocHook_AllocHandle AllocHandle; + inline static bool AddSampledDeleteHook(SampledDeleteHook hook) { + return MallocHook_AddSampledDeleteHook(hook); + } + inline static bool RemoveSampledDeleteHook(SampledDeleteHook hook) { + return MallocHook_RemoveSampledDeleteHook(hook); + } + inline static void InvokeSampledDeleteHook(AllocHandle handle); + + // The PreMmapHook is invoked with mmap's or mmap64's arguments just + // before the mmap/mmap64 call is actually made. Such a hook may be useful + // in memory limited contexts, to catch allocations that will exceed + // a memory limit, and take outside actions to increase that limit. + typedef MallocHook_PreMmapHook PreMmapHook; + inline static bool AddPreMmapHook(PreMmapHook hook) { + return MallocHook_AddPreMmapHook(hook); + } + inline static bool RemovePreMmapHook(PreMmapHook hook) { + return MallocHook_RemovePreMmapHook(hook); + } + inline static void InvokePreMmapHook(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); + + // The MmapReplacement is invoked with mmap's arguments and place to put the + // result into after the PreMmapHook but before the mmap/mmap64 call is + // actually made. + // The MmapReplacement should return true if it handled the call, or false + // if it is still necessary to call mmap/mmap64. + // This should be used only by experts, and users must be be + // extremely careful to avoid recursive calls to mmap. The replacement + // should be async signal safe. + // Only one MmapReplacement is supported. After setting an MmapReplacement + // you must call RemoveMmapReplacement before calling SetMmapReplacement + // again. + typedef MallocHook_MmapReplacement MmapReplacement; + inline static bool SetMmapReplacement(MmapReplacement hook) { + return MallocHook_SetMmapReplacement(hook); + } + inline static bool RemoveMmapReplacement(MmapReplacement hook) { + return MallocHook_RemoveMmapReplacement(hook); + } + inline static bool InvokeMmapReplacement(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result); + + + // The MmapHook is invoked with mmap's return value and arguments whenever + // a region of memory has been just mapped. + // It may be passed MAP_FAILED if the mmap failed. + typedef MallocHook_MmapHook MmapHook; + inline static bool AddMmapHook(MmapHook hook) { + return MallocHook_AddMmapHook(hook); + } + inline static bool RemoveMmapHook(MmapHook hook) { + return MallocHook_RemoveMmapHook(hook); + } + inline static void InvokeMmapHook(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); + + // The MunmapReplacement is invoked with munmap's arguments and place to put + // the result into just before the munmap call is actually made. + // The MunmapReplacement should return true if it handled the call, or false + // if it is still necessary to call munmap. + // This should be used only by experts. The replacement should be + // async signal safe. + // Only one MunmapReplacement is supported. After setting an + // MunmapReplacement you must call RemoveMunmapReplacement before + // calling SetMunmapReplacement again. + typedef MallocHook_MunmapReplacement MunmapReplacement; + inline static bool SetMunmapReplacement(MunmapReplacement hook) { + return MallocHook_SetMunmapReplacement(hook); + } + inline static bool RemoveMunmapReplacement(MunmapReplacement hook) { + return MallocHook_RemoveMunmapReplacement(hook); + } + inline static bool InvokeMunmapReplacement(const void* start, + size_t size, + int* result); + + // The MunmapHook is invoked with munmap's arguments just before the munmap + // call is actually made. + // TODO(maxim): Rename this to PreMunmapHook for consistency with PreMmapHook + // and PreSbrkHook. + typedef MallocHook_MunmapHook MunmapHook; + inline static bool AddMunmapHook(MunmapHook hook) { + return MallocHook_AddMunmapHook(hook); + } + inline static bool RemoveMunmapHook(MunmapHook hook) { + return MallocHook_RemoveMunmapHook(hook); + } + inline static void InvokeMunmapHook(const void* start, size_t size); + + // The MremapHook is invoked with mremap's return value and arguments + // whenever a region of memory has been just remapped. + typedef MallocHook_MremapHook MremapHook; + inline static bool AddMremapHook(MremapHook hook) { + return MallocHook_AddMremapHook(hook); + } + inline static bool RemoveMremapHook(MremapHook hook) { + return MallocHook_RemoveMremapHook(hook); + } + inline static void InvokeMremapHook(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr); + + // The PreSbrkHook is invoked with sbrk's argument just before sbrk is called + // -- except when the increment is 0. This is because sbrk(0) is often called + // to get the top of the memory stack, and is not actually a + // memory-allocation call. It may be useful in memory-limited contexts, + // to catch allocations that will exceed the limit and take outside + // actions to increase such a limit. + typedef MallocHook_PreSbrkHook PreSbrkHook; + inline static bool AddPreSbrkHook(PreSbrkHook hook) { + return MallocHook_AddPreSbrkHook(hook); + } + inline static bool RemovePreSbrkHook(PreSbrkHook hook) { + return MallocHook_RemovePreSbrkHook(hook); + } + inline static void InvokePreSbrkHook(ptrdiff_t increment); + + // The SbrkHook is invoked with sbrk's result and argument whenever sbrk + // has just executed -- except when the increment is 0. + // This is because sbrk(0) is often called to get the top of the memory stack, + // and is not actually a memory-allocation call. + typedef MallocHook_SbrkHook SbrkHook; + inline static bool AddSbrkHook(SbrkHook hook) { + return MallocHook_AddSbrkHook(hook); + } + inline static bool RemoveSbrkHook(SbrkHook hook) { + return MallocHook_RemoveSbrkHook(hook); + } + inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment); + + // Pointer to a absl::GetStackTrace implementation, following the API in + // base/stacktrace.h. + using GetStackTraceFn = int (*)(void**, int, int); + + // Get the current stack trace. Try to skip all routines up to and + // including the caller of MallocHook::Invoke*. + // Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h) + // as a hint about how many routines to skip if better information + // is not available. + // Stack trace is filled into *result up to the size of max_depth. + // The actual number of stack frames filled is returned. + inline static int GetCallerStackTrace(void** result, int max_depth, + int skip_count, + GetStackTraceFn get_stack_trace_fn) { + return MallocHook_GetCallerStackTrace(result, max_depth, skip_count, + get_stack_trace_fn); + } + +#if ABSL_HAVE_MMAP + // Unhooked versions of mmap() and munmap(). These should be used + // only by experts, since they bypass heapchecking, etc. + // Note: These do not run hooks, but they still use the MmapReplacement + // and MunmapReplacement. + static void* UnhookedMMap(void* start, size_t size, int protection, int flags, + int fd, off_t offset); + static int UnhookedMUnmap(void* start, size_t size); +#endif + + private: + // Slow path versions of Invoke*Hook. + static void InvokeNewHookSlow(const void* ptr, + size_t size) ABSL_ATTRIBUTE_COLD; + static void InvokeDeleteHookSlow(const void* ptr) ABSL_ATTRIBUTE_COLD; + static void InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc) + ABSL_ATTRIBUTE_COLD; + static void InvokeSampledDeleteHookSlow(AllocHandle handle) + ABSL_ATTRIBUTE_COLD; + static void InvokePreMmapHookSlow(const void* start, size_t size, + int protection, int flags, int fd, + off_t offset) ABSL_ATTRIBUTE_COLD; + static void InvokeMmapHookSlow(const void* result, const void* start, + size_t size, int protection, int flags, int fd, + off_t offset) ABSL_ATTRIBUTE_COLD; + static bool InvokeMmapReplacementSlow(const void* start, size_t size, + int protection, int flags, int fd, + off_t offset, + void** result) ABSL_ATTRIBUTE_COLD; + static void InvokeMunmapHookSlow(const void* ptr, + size_t size) ABSL_ATTRIBUTE_COLD; + static bool InvokeMunmapReplacementSlow(const void* ptr, size_t size, + int* result) ABSL_ATTRIBUTE_COLD; + static void InvokeMremapHookSlow(const void* result, const void* old_addr, + size_t old_size, size_t new_size, int flags, + const void* new_addr) ABSL_ATTRIBUTE_COLD; + static void InvokePreSbrkHookSlow(ptrdiff_t increment) ABSL_ATTRIBUTE_COLD; + static void InvokeSbrkHookSlow(const void* result, + ptrdiff_t increment) ABSL_ATTRIBUTE_COLD; +}; + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_MALLOC_HOOK_H_ diff --git a/absl/base/internal/malloc_hook_c.h b/absl/base/internal/malloc_hook_c.h new file mode 100644 index 000000000..03b84ca62 --- /dev/null +++ b/absl/base/internal/malloc_hook_c.h @@ -0,0 +1,131 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * C shims for the C++ malloc_hook.h. See malloc_hook.h for details + * on how to use these. + */ +#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ +#define ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Get the current stack trace. Try to skip all routines up to and + * including the caller of MallocHook::Invoke*. + * Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h) + * as a hint about how many routines to skip if better information + * is not available. + */ +typedef int (*MallocHook_GetStackTraceFn)(void**, int, int); +int MallocHook_GetCallerStackTrace(void** result, int max_depth, int skip_count, + MallocHook_GetStackTraceFn fn); + +/* All the MallocHook_{Add,Remove}*Hook functions below return 1 on success + * and 0 on failure. + */ + +typedef void (*MallocHook_NewHook)(const void* ptr, size_t size); +int MallocHook_AddNewHook(MallocHook_NewHook hook); +int MallocHook_RemoveNewHook(MallocHook_NewHook hook); + +typedef void (*MallocHook_DeleteHook)(const void* ptr); +int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook); +int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook); + +typedef int64_t MallocHook_AllocHandle; +typedef struct { + /* See malloc_hook.h for documentation for this struct. */ + MallocHook_AllocHandle handle; + size_t allocated_size; + int stack_depth; + const void* stack; +} MallocHook_SampledAlloc; +typedef void (*MallocHook_SampledNewHook)( + const MallocHook_SampledAlloc* sampled_alloc); +int MallocHook_AddSampledNewHook(MallocHook_SampledNewHook hook); +int MallocHook_RemoveSampledNewHook(MallocHook_SampledNewHook hook); + +typedef void (*MallocHook_SampledDeleteHook)(MallocHook_AllocHandle handle); +int MallocHook_AddSampledDeleteHook(MallocHook_SampledDeleteHook hook); +int MallocHook_RemoveSampledDeleteHook(MallocHook_SampledDeleteHook hook); + +typedef void (*MallocHook_PreMmapHook)(const void *start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); +int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook); +int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook); + +typedef void (*MallocHook_MmapHook)(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset); +int MallocHook_AddMmapHook(MallocHook_MmapHook hook); +int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook); + +typedef int (*MallocHook_MmapReplacement)(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result); +int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook); +int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook); + +typedef void (*MallocHook_MunmapHook)(const void* start, size_t size); +int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook); +int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook); + +typedef int (*MallocHook_MunmapReplacement)(const void* start, + size_t size, + int* result); +int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook); +int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook); + +typedef void (*MallocHook_MremapHook)(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr); +int MallocHook_AddMremapHook(MallocHook_MremapHook hook); +int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook); + +typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment); +int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook); +int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook); + +typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment); +int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook); +int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ */ diff --git a/absl/base/internal/malloc_hook_invoke.h b/absl/base/internal/malloc_hook_invoke.h new file mode 100644 index 000000000..c08220cbc --- /dev/null +++ b/absl/base/internal/malloc_hook_invoke.h @@ -0,0 +1,198 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/// + +// This has the implementation details of malloc_hook that are needed +// to use malloc-hook inside the tcmalloc system. It does not hold +// any of the client-facing calls that are used to add new hooks. +// +// IWYU pragma: private, include "base/malloc_hook-inl.h" + +#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_ +#define ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_ + +#include +#include +#include + +#include "absl/base/internal/malloc_hook.h" + +namespace absl { +namespace base_internal { + +// Maximum of 7 hooks means that HookList is 8 words. +static constexpr int kHookListMaxValues = 7; + +// HookList: a class that provides synchronized insertions and removals and +// lockless traversal. Most of the implementation is in malloc_hook.cc. +template +struct HookList { + static_assert(sizeof(T) <= sizeof(intptr_t), "T_should_fit_in_intptr_t"); + + // Adds value to the list. Note that duplicates are allowed. Thread-safe and + // blocking (acquires hooklist_spinlock). Returns true on success; false + // otherwise (failures include invalid value and no space left). + bool Add(T value); + + // Removes the first entry matching value from the list. Thread-safe and + // blocking (acquires hooklist_spinlock). Returns true on success; false + // otherwise (failures include invalid value and no value found). + bool Remove(T value); + + // Store up to n values of the list in output_array, and return the number of + // elements stored. Thread-safe and non-blocking. This is fast (one memory + // access) if the list is empty. + int Traverse(T* output_array, int n) const; + + // Fast inline implementation for fast path of Invoke*Hook. + bool empty() const { + // empty() is only used as an optimization to determine if we should call + // Traverse which has proper acquire loads. Memory reordering around a + // call to empty will either lead to an unnecessary Traverse call, or will + // miss invoking hooks, neither of which is a problem. + return priv_end.load(std::memory_order_relaxed) == 0; + } + + // This internal data is not private so that the class is an aggregate and can + // be initialized by the linker. Don't access this directly. Use the + // INIT_HOOK_LIST macro in malloc_hook.cc. + + // One more than the index of the last valid element in priv_data. During + // 'Remove' this may be past the last valid element in priv_data, but + // subsequent values will be 0. + std::atomic priv_end; + std::atomic priv_data[kHookListMaxValues]; +}; + +extern template struct HookList; + +extern HookList new_hooks_; +extern HookList delete_hooks_; +extern HookList sampled_new_hooks_; +extern HookList sampled_delete_hooks_; +extern HookList premmap_hooks_; +extern HookList mmap_hooks_; +extern HookList mmap_replacement_; +extern HookList munmap_hooks_; +extern HookList munmap_replacement_; +extern HookList mremap_hooks_; +extern HookList presbrk_hooks_; +extern HookList sbrk_hooks_; + +inline void MallocHook::InvokeNewHook(const void* ptr, size_t size) { + if (!absl::base_internal::new_hooks_.empty()) { + InvokeNewHookSlow(ptr, size); + } +} + +inline void MallocHook::InvokeDeleteHook(const void* ptr) { + if (!absl::base_internal::delete_hooks_.empty()) { + InvokeDeleteHookSlow(ptr); + } +} + +inline void MallocHook::InvokeSampledNewHook( + const SampledAlloc* sampled_alloc) { + if (!absl::base_internal::sampled_new_hooks_.empty()) { + InvokeSampledNewHookSlow(sampled_alloc); + } +} + +inline void MallocHook::InvokeSampledDeleteHook(AllocHandle handle) { + if (!absl::base_internal::sampled_delete_hooks_.empty()) { + InvokeSampledDeleteHookSlow(handle); + } +} + +inline void MallocHook::InvokePreMmapHook(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + if (!absl::base_internal::premmap_hooks_.empty()) { + InvokePreMmapHookSlow(start, size, protection, flags, fd, offset); + } +} + +inline void MallocHook::InvokeMmapHook(const void* result, + const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset) { + if (!absl::base_internal::mmap_hooks_.empty()) { + InvokeMmapHookSlow(result, start, size, protection, flags, fd, offset); + } +} + +inline bool MallocHook::InvokeMmapReplacement(const void* start, + size_t size, + int protection, + int flags, + int fd, + off_t offset, + void** result) { + if (!absl::base_internal::mmap_replacement_.empty()) { + return InvokeMmapReplacementSlow(start, size, + protection, flags, + fd, offset, + result); + } + return false; +} + +inline void MallocHook::InvokeMunmapHook(const void* start, size_t size) { + if (!absl::base_internal::munmap_hooks_.empty()) { + InvokeMunmapHookSlow(start, size); + } +} + +inline bool MallocHook::InvokeMunmapReplacement( + const void* start, size_t size, int* result) { + if (!absl::base_internal::mmap_replacement_.empty()) { + return InvokeMunmapReplacementSlow(start, size, result); + } + return false; +} + +inline void MallocHook::InvokeMremapHook(const void* result, + const void* old_addr, + size_t old_size, + size_t new_size, + int flags, + const void* new_addr) { + if (!absl::base_internal::mremap_hooks_.empty()) { + InvokeMremapHookSlow(result, old_addr, old_size, new_size, flags, new_addr); + } +} + +inline void MallocHook::InvokePreSbrkHook(ptrdiff_t increment) { + if (!absl::base_internal::presbrk_hooks_.empty() && increment != 0) { + InvokePreSbrkHookSlow(increment); + } +} + +inline void MallocHook::InvokeSbrkHook(const void* result, + ptrdiff_t increment) { + if (!absl::base_internal::sbrk_hooks_.empty() && increment != 0) { + InvokeSbrkHookSlow(result, increment); + } +} + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_ diff --git a/absl/base/internal/malloc_hook_mmap_linux.inc b/absl/base/internal/malloc_hook_mmap_linux.inc new file mode 100644 index 000000000..059ded576 --- /dev/null +++ b/absl/base/internal/malloc_hook_mmap_linux.inc @@ -0,0 +1,236 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// We define mmap() and mmap64(), which somewhat reimplements libc's mmap +// syscall stubs. Unfortunately libc only exports the stubs via weak symbols +// (which we're overriding with our mmap64() and mmap() wrappers) so we can't +// just call through to them. + +#ifndef __linux__ +# error Should only be including malloc_hook_mmap_linux.h on linux systems. +#endif + +#include +#include +#ifdef __BIONIC__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#ifdef __BIONIC__ +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include +#else +#include +#endif // __BIONIC__ +#endif // __mips__ + +// SYS_mmap, SYS_munmap, and SYS_mremap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void *__mmap2(void *, size_t, int, int, int, long); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#ifndef SYS_mremap +#define SYS_mremap __NR_mremap +#endif +#endif // __BIONIC__ + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +static inline void* do_mmap64(void* start, size_t length, int prot, + int flags, int fd, off64_t offset) __THROW { +#if defined(__i386__) || \ + defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__s390__) && !defined(__s390x__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { + pagesize = getpagesize(); + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, offset / pagesize); +#else + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast(offset / pagesize))); +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + uint32_t buf[6] = { + reinterpret_cast(start), static_cast(length), + static_cast(prot), static_cast(flags), + static_cast(fd), static_cast(offset)}; + return reintrepret_cast(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) + // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. + // We need to explicitly cast to an unsigned 64 bit type to avoid implicit + // sign extension. We can't cast pointers directly because those are + // 32 bits, and gcc will dump ugly warnings about casting from a pointer + // to an integer of a different size. We also need to make sure __off64_t + // isn't truncated to 32-bits under x32. + #define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast(offset))); + #undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +// We use do_mmap64 abstraction to put MallocHook::InvokeMmapHook +// calls right into mmap and mmap64, so that the stack frames in the caller's +// stack are at the same offsets for all the calls of memory allocating +// functions. + +// Put all callers of MallocHook::Invoke* in this module into +// malloc_hook section, +// so that MallocHook::GetCallerStackTrace can function accurately: + +// Make sure mmap doesn't get #define'd away by +# undef mmap + +extern "C" { +ABSL_ATTRIBUTE_SECTION(malloc_hook) +void* mmap64(void* start, size_t length, int prot, int flags, int fd, + off64_t offset) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) +void* mmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) +int munmap(void* start, size_t length) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) +void* mremap(void* old_addr, size_t old_size, size_t new_size, int flags, + ...) __THROW; +ABSL_ATTRIBUTE_SECTION(malloc_hook) void* sbrk(ptrdiff_t increment) __THROW; +} + +extern "C" void* mmap64(void *start, size_t length, int prot, int flags, + int fd, off64_t offset) __THROW { + absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags, + fd, offset); + void *result; + if (!absl::base_internal::MallocHook::InvokeMmapReplacement( + start, length, prot, flags, fd, offset, &result)) { + result = do_mmap64(start, length, prot, flags, fd, offset); + } + absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot, + flags, fd, offset); + return result; +} + +# if !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH) + +extern "C" void* mmap(void *start, size_t length, int prot, int flags, + int fd, off_t offset) __THROW { + absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags, + fd, offset); + void *result; + if (!absl::base_internal::MallocHook::InvokeMmapReplacement( + start, length, prot, flags, fd, offset, &result)) { + result = do_mmap64(start, length, prot, flags, fd, + static_cast(offset)); // avoid sign extension + } + absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot, + flags, fd, offset); + return result; +} + +# endif // !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH) + +extern "C" int munmap(void* start, size_t length) __THROW { + absl::base_internal::MallocHook::InvokeMunmapHook(start, length); + int result; + if (!absl::base_internal::MallocHook::InvokeMunmapReplacement(start, length, + &result)) { + result = syscall(SYS_munmap, start, length); + } + return result; +} + +extern "C" void* mremap(void* old_addr, size_t old_size, size_t new_size, + int flags, ...) __THROW { + va_list ap; + va_start(ap, flags); + void *new_address = va_arg(ap, void *); + va_end(ap); + void* result = reinterpret_cast( + syscall(SYS_mremap, old_addr, old_size, new_size, flags, new_address)); + absl::base_internal::MallocHook::InvokeMremapHook( + result, old_addr, old_size, new_size, flags, new_address); + return result; +} + +// sbrk cannot be intercepted on Android as there is no mechanism to +// invoke the original sbrk (since there is no __sbrk as with glibc). +#if !defined(__BIONIC__) +// libc's version: +extern "C" void* __sbrk(ptrdiff_t increment); + +extern "C" void* sbrk(ptrdiff_t increment) __THROW { + absl::base_internal::MallocHook::InvokePreSbrkHook(increment); + void *result = __sbrk(increment); + absl::base_internal::MallocHook::InvokeSbrkHook(result, increment); + return result; +} +#endif // !defined(__BIONIC__) + +namespace absl { +namespace base_internal { + +/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot, + int flags, int fd, off_t offset) { + void* result; + if (!MallocHook::InvokeMmapReplacement( + start, length, prot, flags, fd, offset, &result)) { + result = do_mmap64(start, length, prot, flags, fd, offset); + } + return result; +} + +/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) { + int result; + if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) { + result = syscall(SYS_munmap, start, length); + } + return result; +} + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/per_thread_tls.h b/absl/base/internal/per_thread_tls.h new file mode 100644 index 000000000..739745103 --- /dev/null +++ b/absl/base/internal/per_thread_tls.h @@ -0,0 +1,48 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// If the platform supports thread-local storage: +// ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// ABSL_PER_THREAD_TLS_KEYWORD is empty +// ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programme can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc new file mode 100644 index 000000000..d197d444a --- /dev/null +++ b/absl/base/internal/raw_logging.cc @@ -0,0 +1,225 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/log_severity.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/port.h" + +// We know how to perform low-level writes to stderr in POSIX and Windows. For +// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED. +// Much of raw_logging.cc becomes a no-op when we can't output messages, +// although a FATAL ABSL_RAW_LOG message will still abort the process. + +// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write() +// (as from unistd.h) +// +// This preprocessor token is also defined in raw_io.cc. If you need to copy +// this, consider moving both to config.h instead. +#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) || \ + defined(__GENCLAVE__) +#include +#define ABSL_HAVE_POSIX_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_POSIX_WRITE +#endif + +// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall +// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); +// for low level operations that want to avoid libc. +#if defined(__linux__) && !defined(__ANDROID__) +#include +#define ABSL_HAVE_SYSCALL_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_SYSCALL_WRITE +#endif + +#ifdef _WIN32 +#include +#define ABSL_HAVE_RAW_IO 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_RAW_IO +#endif + +// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. +// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a +// whitelisted set of platforms for which we expect not to be able to raw log. + +ABSL_CONST_INIT static absl::base_internal::AtomicHook< + absl::raw_logging_internal::LogPrefixHook> log_prefix_hook; +ABSL_CONST_INIT static absl::base_internal::AtomicHook< + absl::raw_logging_internal::AbortHook> abort_hook; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED +static const char kTruncated[] = " ... (message truncated)\n"; + +// sprintf the format to the buffer, adjusting *buf and *size to reflect the +// consumed bytes, and return whether the message fit without truncation. If +// truncation occurred, if possible leave room in the buffer for the message +// kTruncated[]. +inline static bool VADoRawLog(char** buf, int* size, + const char* format, va_list ap) { + int n = vsnprintf(*buf, *size, format, ap); + bool result = true; + if (n < 0 || n > *size) { + result = false; + if (static_cast(*size) > sizeof(kTruncated)) { + n = *size - sizeof(kTruncated); // room for truncation message + } else { + n = 0; // no room for truncation message + } + } + *size -= n; + *buf += n; + return result; +} +#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED + +static constexpr int kLogBufSize = 3000; + +namespace absl { +namespace raw_logging_internal { +void SafeWriteToStderr(const char *s, size_t len); +} // namespace raw_logging_internal +} // namespace absl + +namespace { + +// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths +// that invoke malloc() and getenv() that might acquire some locks. +// If this becomes a problem we should reimplement a subset of vsnprintf +// that does not need locks and malloc. +// E.g. google3/third_party/clearsilver/core/util/snprintf.c +// looks like such a reimplementation. + +// Helper for RawLog below. +// *DoRawLog writes to *buf of *size and move them past the written portion. +// It returns true iff there was no overflow or error. +bool DoRawLog(char** buf, int* size, const char* format, ...) + ABSL_PRINTF_ATTRIBUTE(3, 4); +bool DoRawLog(char** buf, int* size, const char* format, ...) { + va_list ap; + va_start(ap, format); + int n = vsnprintf(*buf, *size, format, ap); + va_end(ap); + if (n < 0 || n > *size) return false; + *size -= n; + *buf += n; + return true; +} + +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) { + char buffer[kLogBufSize]; + char* buf = buffer; + int size = sizeof(buffer); +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + bool enabled = true; +#else + bool enabled = false; +#endif + +#ifdef ABSL_MIN_LOG_LEVEL + if (static_cast(severity) < ABSL_MIN_LOG_LEVEL && + severity < absl::LogSeverity::kFatal) { + enabled = false; + } +#endif + + auto log_prefix_hook_ptr = log_prefix_hook.Load(); + if (log_prefix_hook_ptr) { + enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size); + } else { + if (enabled) { + DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line); + } + } + const char* const prefix_end = buf; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + if (enabled) { + bool no_chop = VADoRawLog(&buf, &size, format, ap); + if (no_chop) { + DoRawLog(&buf, &size, "\n"); + } else { + DoRawLog(&buf, &size, "%s", kTruncated); + } + absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); + } +#else + static_cast(format); + static_cast(ap); +#endif + + // Abort the process after logging a FATAL message, even if the output itself + // was suppressed. + if (severity == absl::LogSeverity::kFatal) { + abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); + abort(); + } +} + +} // namespace + +namespace absl { +namespace raw_logging_internal { + +// Writes the provided buffer directly to stderr, in a safe, low-level manner. +// +// In POSIX this means calling write(), which is async-signal safe and does +// not malloc. If the platform supports the SYS_write syscall, we invoke that +// directly to side-step any libc interception. +void SafeWriteToStderr(const char *s, size_t len) { +#if defined(ABSL_HAVE_SYSCALL_WRITE) + syscall(SYS_write, STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_POSIX_WRITE) + write(STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_RAW_IO) + _write(/* stderr */ 2, s, len); +#else + // stderr logging unsupported on this platform + (void) s; + (void) len; +#endif +} + +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) { + va_list ap; + va_start(ap, format); + RawLogVA(severity, file, line, format, ap); + va_end(ap); +} + +bool RawLoggingFullySupported() { +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + return true; +#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED + return false; +#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED +} + +} // namespace raw_logging_internal +} // namespace absl diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h new file mode 100644 index 000000000..47cf4373d --- /dev/null +++ b/absl/base/internal/raw_logging.h @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include "absl/base/internal/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file +#define ABSL_RAW_LOG(severity, ...) \ + do { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, \ + sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_basename, \ + __LINE__, __VA_ARGS__); \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +namespace absl { +namespace raw_logging_internal { + +// Helper function to implement ABSL_RAW_LOG +// Logs format... at "severity" level, reporting it +// as called from file:line. +// This does not allocate memory or acquire locks. +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + +// compile-time function to get the "base" filename, that is, the part of +// a filename after the last "/" or "\" path separator. The search starts at +// the end of the std::string; the second parameter is the length of the std::string. +constexpr const char* Basename(const char* fname, int offset) { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' + ? fname + offset + : Basename(fname, offset - 1); +} + +// For testing only. +// Returns true if raw logging is fully supported. When it is not +// fully supported, no messages will be emitted, but a log at FATAL +// severity will cause an abort. +// +// TODO(gfalcon): Come up with a better name for this method. +bool RawLoggingFullySupported(); + +// Function type for a raw_logging customization hook for suppressing messages +// by severity, and for writing custom prefixes on non-suppressed messages. +// +// The installed hook is called for every raw log invocation. The message will +// be logged to stderr only if the hook returns true. FATAL errors will cause +// the process to abort, even if writing to stderr is suppressed. The hook is +// also provided with an output buffer, where it can write a custom log message +// prefix. +// +// The raw_logging system does not allocate memory or grab locks. User-provided +// hooks must avoid these operations, and must not throw exceptions. +// +// 'severity' is the severity level of the message being written. +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buffer and decrement *buf_size +// accordingly. +using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, + int line, char** buffer, int* buf_size); + +// Function type for a raw_logging customization hook called to abort a process +// when a FATAL message is logged. If the provided AbortHook() returns, the +// logging system will call abort(). +// +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// The null-terminated logged message lives in the buffer between 'buf_start' +// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the +// buffer (as written by the LogPrefixHook.) +using AbortHook = void (*)(const char* file, int line, const char* buf_start, + const char* prefix_end, const char* buf_end); + +} // namespace raw_logging_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/absl/base/internal/scheduling_mode.h b/absl/base/internal/scheduling_mode.h new file mode 100644 index 000000000..b7560f30d --- /dev/null +++ b/absl/base/internal/scheduling_mode.h @@ -0,0 +1,54 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level //base interfaces such +// as SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +namespace absl { +namespace base_internal { + +// Used to describe how a thread may be scheduled. Typically associated with +// the declaration of a resource supporting synchronized access. +// +// SCHEDULE_COOPERATIVE_AND_KERNEL: +// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may +// reschedule (using base::scheduling semantics); allowing other cooperative +// threads to proceed. +// +// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") +// Specifies that no cooperative scheduling semantics may be used, even if the +// current thread is itself cooperatively scheduled. This means that +// cooperative threads will NOT allow other cooperative threads to execute in +// their place while waiting for a resource of this type. Host operating system +// semantics (e.g. a futex) may still be used. +// +// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL +// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which +// base::scheduling (e.g. the implementation of a Scheduler) may depend. +// +// NOTE: Cooperative resources may not be nested below non-cooperative ones. +// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL +// resource if a SCHEDULE_KERNEL_ONLY resource is already held. +enum SchedulingMode { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. +}; + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc new file mode 100644 index 000000000..6257bfcec --- /dev/null +++ b/absl/base/internal/spinlock.cc @@ -0,0 +1,243 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/spinlock.h" + +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ + +// Description of lock-word: +// 31..00: [............................3][2][1][0] +// +// [0]: kSpinLockHeld +// [1]: kSpinLockCooperative +// [2]: kSpinLockDisabledScheduling +// [31..3]: ONLY kSpinLockSleeper OR +// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT +// +// Detailed descriptions: +// +// Bit [0]: The lock is considered held iff kSpinLockHeld is set. +// +// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when +// contended iff kSpinLockCooperative is set. +// +// Bit [2]: This bit is exclusive from bit [1]. It is used only by a +// non-cooperative lock. When set, indicates that scheduling was +// successfully disabled when the lock was acquired. May be unset, +// even if non-cooperative, if a ThreadIdentity did not yet exist at +// time of acquisition. +// +// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was +// acquired without contention, however, at least one waiter exists. +// +// Otherwise, bits [31..3] represent the time spent by the current lock +// holder to acquire the lock. There may be outstanding waiter(s). + +namespace absl { +namespace base_internal { + +static int adaptive_spin_count = 0; + +namespace { +struct SpinLock_InitHelper { + SpinLock_InitHelper() { + // On multi-cpu machines, spin for longer before yielding + // the processor or sleeping. Reduces idle time significantly. + if (base_internal::NumCPUs() > 1) { + adaptive_spin_count = 1000; + } + } +}; + +// Hook into global constructor execution: +// We do not do adaptive spinning before that, +// but nothing lock-intensive should be going on at that time. +static SpinLock_InitHelper init_helper; + +ABSL_CONST_INIT static base_internal::AtomicHook + submit_profile_data; + +} // namespace + +void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, + int64_t wait_cycles)) { + submit_profile_data.Store(fn); +} + +static inline bool IsCooperative( + base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; +} + +// Uncommon constructors. +SpinLock::SpinLock(base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + ABSL_TSAN_MUTEX_CREATE(this, 0); +} + +SpinLock::SpinLock(base_internal::LinkerInitialized, + base_internal::SchedulingMode mode) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init); + if (IsCooperative(mode)) { + InitLinkerInitializedAndCooperative(); + } + // Otherwise, lockword_ is already initialized. +} + +// Static (linker initialized) spinlocks always start life as functional +// non-cooperative locks. When their static constructor does run, it will call +// this initializer to augment the lockword with the cooperative bit. By +// actually taking the lock when we do this we avoid the need for an atomic +// operation in the regular unlock path. +// +// SlowLock() must be careful to re-test for this bit so that any outstanding +// waiters may be upgraded to cooperative status. +void SpinLock::InitLinkerInitializedAndCooperative() { + Lock(); + lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed); + Unlock(); +} + +// Monitor the lock to see if its value changes within some time period +// (adaptive_spin_count loop iterations). A timestamp indicating +// when the thread initially started waiting for the lock is passed in via +// the initial_wait_timestamp value. The total wait time in cycles for the +// lock is returned in the wait_cycles parameter. The last value read +// from the lock is returned from the method. +uint32_t SpinLock::SpinLoop(int64_t initial_wait_timestamp, + uint32_t *wait_cycles) { + int c = adaptive_spin_count; + uint32_t lock_value; + do { + lock_value = lockword_.load(std::memory_order_relaxed); + } while ((lock_value & kSpinLockHeld) != 0 && --c > 0); + uint32_t spin_loop_wait_cycles = + EncodeWaitCycles(initial_wait_timestamp, CycleClock::Now()); + *wait_cycles = spin_loop_wait_cycles; + + return TryLockInternal(lock_value, spin_loop_wait_cycles); +} + +void SpinLock::SlowLock() { + // The lock was not obtained initially, so this thread needs to wait for + // it. Record the current timestamp in the local variable wait_start_time + // so the total wait time can be stored in the lockword once this thread + // obtains the lock. + int64_t wait_start_time = CycleClock::Now(); + uint32_t wait_cycles; + uint32_t lock_value = SpinLoop(wait_start_time, &wait_cycles); + + int lock_wait_call_count = 0; + while ((lock_value & kSpinLockHeld) != 0) { + // If the lock is currently held, but not marked as having a sleeper, mark + // it as having a sleeper. + if ((lock_value & kWaitTimeMask) == 0) { + // Here, just "mark" that the thread is going to sleep. Don't store the + // lock wait time in the lock as that will cause the current lock + // owner to think it experienced contention. + if (lockword_.compare_exchange_strong( + lock_value, lock_value | kSpinLockSleeper, + std::memory_order_acquire, std::memory_order_relaxed)) { + // Successfully transitioned to kSpinLockSleeper. Pass + // kSpinLockSleeper to the SpinLockWait routine to properly indicate + // the last lock_value observed. + lock_value |= kSpinLockSleeper; + } else if ((lock_value & kSpinLockHeld) == 0) { + // Lock is free again, so try and acquire it before sleeping. The + // new lock state will be the number of cycles this thread waited if + // this thread obtains the lock. + lock_value = TryLockInternal(lock_value, wait_cycles); + continue; // Skip the delay at the end of the loop. + } + } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + // SpinLockDelay() calls into fiber scheduler, we need to see + // synchronization there to avoid false positives. + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + // Wait for an OS specific delay. + base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + // Spin again after returning from the wait routine to give this thread + // some chance of obtaining the lock. + lock_value = SpinLoop(wait_start_time, &wait_cycles); + } +} + +void SpinLock::SlowUnlock(uint32_t lock_value) { + base_internal::SpinLockWake(&lockword_, + false); // wake waiter if necessary + + // If our acquisition was contended, collect contentionz profile info. We + // reserve a unitary wait time to represent that a waiter exists without our + // own acquisition having been contended. + if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { + const uint64_t wait_cycles = DecodeWaitCycles(lock_value); + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + submit_profile_data(this, wait_cycles); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + } +} + +// We use the upper 29 bits of the lock word to store the time spent waiting to +// acquire this lock. This is reported by contentionz profiling. Since the +// lower bits of the cycle counter wrap very quickly on high-frequency +// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT +// sized units. On a 4Ghz machine this will lose track of wait times greater +// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. +enum { PROFILE_TIMESTAMP_SHIFT = 7 }; +enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits. + +uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + static const int64_t kMaxWaitTime = + std::numeric_limits::max() >> LOCKWORD_RESERVED_SHIFT; + int64_t scaled_wait_time = + (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT; + + // Return a representation of the time spent waiting that can be stored in + // the lock word's upper bits. bit_cast is required as Atomic32 is signed. + const uint32_t clamped = static_cast( + std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT); + + // bump up value if necessary to avoid returning kSpinLockSleeper. + const uint32_t after_spinlock_sleeper = + kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); + return clamped == kSpinLockSleeper ? after_spinlock_sleeper : clamped; +} + +uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { + // Cast to uint32_t first to ensure bits [63:32] are cleared. + const uint64_t scaled_wait_time = + static_cast(lock_value & kWaitTimeMask); + return scaled_wait_time + << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT); +} + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h new file mode 100644 index 000000000..fa64ba65b --- /dev/null +++ b/absl/base/internal/spinlock.h @@ -0,0 +1,227 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in three situations: +// - for use in code that Mutex itself depends on +// - to get a faster fast-path release under low contention (without an +// atomic read-modify-write) In return, SpinLock has worse behaviour under +// contention, which is why Mutex is preferred in most situations. +// - for async signal safety (see below) + +// SpinLock is async signal safe. If a spinlock is used within a signal +// handler, all code that acquires the lock must ensure that the signal cannot +// arrive while they are holding the lock. Typically, this is done by blocking +// the signal. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/port.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +namespace base_internal { + +class LOCKABLE SpinLock { + public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, 0); + } + + // Special constructor for use with static SpinLock objects. E.g., + // + // static SpinLock lock(base_internal::kLinkerInitialized); + // + // When intialized using this constructor, we depend on the fact + // that the linker has already initialized the memory appropriately. + // A SpinLock constructed like this can be freely used from global + // initializers without worrying about the order in which global + // initializers run. + explicit SpinLock(base_internal::LinkerInitialized) { + // Does nothing; lockword_ is already initialized + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + SpinLock(base_internal::LinkerInitialized, + base_internal::SchedulingMode mode); + + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, 0); } + + // Acquire this SpinLock. + inline void Lock() EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lockword_.store(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static uint64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether a lock disables scheduling. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + enum { kSpinLockHeld = 1 }; + enum { kSpinLockCooperative = 2 }; + enum { kSpinLockDisabledScheduling = 4 }; + enum { kSpinLockSleeper = 8 }; + enum { kWaitTimeMask = // Includes kSpinLockSleeper. + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) }; + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void InitLinkerInitializedAndCooperative(); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(int64_t initial_wait_timestamp, uint32_t* wait_cycles); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class SCOPED_LOCKABLE SpinLockHolder { + public: + inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void* lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + } else { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit); + } + + return lock_value; +} + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/absl/base/internal/spinlock_posix.inc b/absl/base/internal/spinlock_posix.inc new file mode 100644 index 000000000..0098c1c76 --- /dev/null +++ b/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include +#include +#include +#include + +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + int save_errno = errno; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } + errno = save_errno; +} + +ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc new file mode 100644 index 000000000..0fd362866 --- /dev/null +++ b/absl/base/internal/spinlock_wait.cc @@ -0,0 +1,77 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The OS-specific header included below must provide two calls: +// base::subtle::SpinLockDelay() and base::subtle::SpinLockWake(). +// See spinlock_wait.h for the specs. + +#include +#include + +#include "absl/base/internal/spinlock_wait.h" + +#if defined(_WIN32) +#include "absl/base/internal/spinlock_win32.inc" +#else +#include "absl/base/internal/spinlock_posix.inc" +#endif + +namespace absl { +namespace base_internal { + +// See spinlock_wait.h for spec. +uint32_t SpinLockWait(std::atomic *w, int n, + const SpinLockWaitTransition trans[], + base_internal::SchedulingMode scheduling_mode) { + for (int loop = 0; ; loop++) { + uint32_t v = w->load(std::memory_order_acquire); + int i; + for (i = 0; i != n && v != trans[i].from; i++) { + } + if (i == n) { + SpinLockDelay(w, v, loop, scheduling_mode); // no matching transition + } else if (trans[i].to == v || // null transition + w->compare_exchange_strong(v, trans[i].to, + std::memory_order_acquire, + std::memory_order_relaxed)) { + if (trans[i].done) return v; + } + } +} + +static std::atomic delay_rand; + +// Return a suggested delay in nanoseconds for iteration number "loop" +int SpinLockSuggestedDelayNS(int loop) { + // Weak pseudo-random number generator to get some spread between threads + // when many are spinning. + uint64_t r = delay_rand.load(std::memory_order_relaxed); + r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() + delay_rand.store(r, std::memory_order_relaxed); + + r <<= 16; // 48-bit random number now in top 48-bits. + if (loop < 0 || loop > 32) { // limit loop to 0..32 + loop = 32; + } + // loop>>3 cannot exceed 4 because loop cannot exceed 32. + // Select top 20..24 bits of lower 48 bits, + // giving approximately 0ms to 16ms. + // Mean is exponential in loop for first 32 iterations, then 8ms. + // The futex path multiplies this by 16, since we expect explicit wakeups + // almost always on that path. + return r >> (44 - (loop >> 3)); +} + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h new file mode 100644 index 000000000..5432c1ce2 --- /dev/null +++ b/absl/base/internal/spinlock_wait.h @@ -0,0 +1,94 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +// This file is used internally in spinlock.cc and once.cc, and a few other +// places listing in //base:spinlock_wait_users. If you need to use it outside +// of //base, please request permission to be added to that list. + +#include + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl { +namespace base_internal { + +// SpinLockWait() waits until it can perform one of several transitions from +// "from" to "to". It returns when it performs a transition where done==true. +struct SpinLockWaitTransition { + uint32_t from; + uint32_t to; + bool done; +}; + +// Wait until *w can transition from trans[i].from to trans[i].to for some i +// satisfying 0<=i *w, int n, + const SpinLockWaitTransition trans[], + SchedulingMode scheduling_mode); + +// If possible, wake some thread that has called SpinLockDelay(w, ...). If +// "all" is true, wake all such threads. This call is a hint, and on some +// systems it may be a no-op; threads calling SpinLockDelay() will always wake +// eventually even if SpinLockWake() is never called. +void SpinLockWake(std::atomic *w, bool all); + +// Wait for an appropriate spin delay on iteration "loop" of a +// spin loop on location *w, whose previously observed value was "value". +// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, +// or may wait for a delay that can be truncated by a call to SpinLockWake(w). +// In all cases, it must return in bounded time even if SpinLockWake() is not +// called. +void SpinLockDelay(std::atomic *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode); + +// Helper used by AbslInternalSpinLockDelay. +// Returns a suggested delay in nanoseconds for iteration number "loop". +int SpinLockSuggestedDelayNS(int loop); + +} // namespace base_internal +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" { +void AbslInternalSpinLockWake(std::atomic *w, bool all); +void AbslInternalSpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode); +} + +inline void absl::base_internal::SpinLockWake(std::atomic *w, + bool all) { + AbslInternalSpinLockWake(w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode) { + AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/absl/base/internal/spinlock_win32.inc b/absl/base/internal/spinlock_win32.inc new file mode 100644 index 000000000..32c8fc0bb --- /dev/null +++ b/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include +#include +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, + uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); + } +} + +void AbslInternalSpinLockWake(std::atomic* /* lock_word */, + bool /* all */) {} + +} // extern "C" diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc new file mode 100644 index 000000000..11863eab2 --- /dev/null +++ b/absl/base/internal/sysinfo.cc @@ -0,0 +1,370 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifdef _WIN32 +#include +#include +#else +#include +#include +#include +#include +#include +#endif + +#ifdef __linux__ +#include +#endif + +#ifdef __APPLE__ +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/unscaledcycleclock.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +namespace base_internal { + +static once_flag init_system_info_once; +static int num_cpus = 0; +static double nominal_cpu_frequency = 1.0; // 0.0 might be dangerous. + +static int GetNumCPUs() { +#if defined(__myriad2__) || defined(__GENCLAVE__) + // TODO(b/28296132): Calling std::thread::hardware_concurrency() induces a + // link error on myriad2 builds. + // TODO(b/62709537): Support std::thread::hardware_concurrency() in gEnclalve. + return 1; +#else + // Other possibilities: + // - Read /sys/devices/system/cpu/online and use cpumask_parse() + // - sysconf(_SC_NPROCESSORS_ONLN) + return std::thread::hardware_concurrency(); +#endif +} + +#if defined(_WIN32) + +static double GetNominalCPUFrequency() { + DWORD data; + DWORD data_size = sizeof(data); + #pragma comment(lib, "shlwapi.lib") // For SHGetValue(). + if (SUCCEEDED( + SHGetValueA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", + "~MHz", nullptr, &data, &data_size))) { + return data * 1e6; // Value is MHz. + } + return 1.0; +} + +#elif defined(CTL_HW) && defined(HW_CPU_FREQ) + +static double GetNominalCPUFrequency() { + unsigned freq; + size_t size = sizeof(freq); + int mib[2] = {CTL_HW, HW_CPU_FREQ}; + if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) { + return static_cast(freq); + } + return 1.0; +} + +#else + +// Helper function for reading a long from a file. Returns true if successful +// and the memory location pointed to by value is set to the value read. +static bool ReadLongFromFile(const char *file, long *value) { + bool ret = false; + int fd = open(file, O_RDONLY); + if (fd != -1) { + char line[1024]; + char *err; + memset(line, '\0', sizeof(line)); + int len = read(fd, line, sizeof(line) - 1); + if (len <= 0) { + ret = false; + } else { + const long temp_value = strtol(line, &err, 10); + if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { + *value = temp_value; + ret = true; + } + } + close(fd); + } + return ret; +} + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + +// Reads a monotonic time source and returns a value in +// nanoseconds. The returned value uses an arbitrary epoch, not the +// Unix epoch. +static int64_t ReadMonotonicClockNanos() { + struct timespec t; +#ifdef CLOCK_MONOTONIC_RAW + int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t); +#else + int rc = clock_gettime(CLOCK_MONOTONIC, &t); +#endif + if (rc != 0) { + perror("clock_gettime() failed"); + abort(); + } + return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec; +} + +class UnscaledCycleClockWrapperForInitializeFrequency { + public: + static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } +}; + +struct TimeTscPair { + int64_t time; // From ReadMonotonicClockNanos(). + int64_t tsc; // From UnscaledCycleClock::Now(). +}; + +// Returns a pair of values (monotonic kernel time, TSC ticks) that +// approximately correspond to each other. This is accomplished by +// doing several reads and picking the reading with the lowest +// latency. This approach is used to minimize the probability that +// our thread was preempted between clock reads. +static TimeTscPair GetTimeTscPair() { + int64_t best_latency = std::numeric_limits::max(); + TimeTscPair best; + for (int i = 0; i < 10; ++i) { + int64_t t0 = ReadMonotonicClockNanos(); + int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now(); + int64_t t1 = ReadMonotonicClockNanos(); + int64_t latency = t1 - t0; + if (latency < best_latency) { + best_latency = latency; + best.time = t0; + best.tsc = tsc; + } + } + return best; +} + +// Measures and returns the TSC frequency by taking a pair of +// measurements approximately `sleep_nanoseconds` apart. +static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) { + auto t0 = GetTimeTscPair(); + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = sleep_nanoseconds; + while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {} + auto t1 = GetTimeTscPair(); + double elapsed_ticks = t1.tsc - t0.tsc; + double elapsed_time = (t1.time - t0.time) * 1e-9; + return elapsed_ticks / elapsed_time; +} + +// Measures and returns the TSC frequency by calling +// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the +// frequency measurement stabilizes. +static double MeasureTscFrequency() { + double last_measurement = -1.0; + int sleep_nanoseconds = 1000000; // 1 millisecond. + for (int i = 0; i < 8; ++i) { + double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds); + if (measurement * 0.99 < last_measurement && + last_measurement < measurement * 1.01) { + // Use the current measurement if it is within 1% of the + // previous measurement. + return measurement; + } + last_measurement = measurement; + sleep_nanoseconds *= 2; + } + return last_measurement; +} + +#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + +static double GetNominalCPUFrequency() { + long freq = 0; + + // Google's production kernel has a patch to export the TSC + // frequency through sysfs. If the kernel is exporting the TSC + // frequency use that. There are issues where cpuinfo_max_freq + // cannot be relied on because the BIOS may be exporting an invalid + // p-state (on x86) or p-states may be used to put the processor in + // a new mode (turbo mode). Essentially, those frequencies cannot + // always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { + return freq * 1e3; // Value is kHz. + } + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + // On these platforms, the TSC frequency is the nominal CPU + // frequency. But without having the kernel export it directly + // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no + // other way to reliably get the TSC frequency, so we have to + // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by + // exporting "fake" frequencies for implementing new features. For + // example, Intel's turbo mode is enabled by exposing a p-state + // value with a higher frequency than that of the real TSC + // rate. Because of this, we prefer to measure the TSC rate + // ourselves on i386 and x86-64. + return MeasureTscFrequency(); +#else + + // If CPU scaling is in effect, we want to use the *maximum* + // frequency, not whatever CPU speed some random processor happens + // to be using now. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + return freq * 1e3; // Value is kHz. + } + + return 1.0; +#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +} + +#endif + +// InitializeSystemInfo() may be called before main() and before +// malloc is properly initialized, therefore this must not allocate +// memory. +static void InitializeSystemInfo() { + num_cpus = GetNumCPUs(); + nominal_cpu_frequency = GetNominalCPUFrequency(); +} + +int NumCPUs() { + base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo); + return num_cpus; +} + +double NominalCPUFrequency() { + base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo); + return nominal_cpu_frequency; +} + +#if defined(_WIN32) + +pid_t GetTID() { + return GetCurrentThreadId(); +} + +#elif defined(__linux__) + +#ifndef SYS_gettid +#define SYS_gettid __NR_gettid +#endif + +pid_t GetTID() { + return syscall(SYS_gettid); +} + +#else + +// Fallback implementation of GetTID using pthread_getspecific. +static once_flag tid_once; +static pthread_key_t tid_key; +static absl::base_internal::SpinLock tid_lock( + absl::base_internal::kLinkerInitialized); + +// We set a bit per thread in this array to indicate that an ID is in +// use. ID 0 is unused because it is the default value returned by +// pthread_getspecific(). +static std::vector* tid_array GUARDED_BY(tid_lock) = nullptr; +static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. + +// Returns the TID to tid_array. +static void FreeTID(void *v) { + intptr_t tid = reinterpret_cast(v); + int word = tid / kBitsPerWord; + uint32_t mask = ~(1u << (tid % kBitsPerWord)); + absl::base_internal::SpinLockHolder lock(&tid_lock); + assert(0 <= word && static_cast(word) < tid_array->size()); + (*tid_array)[word] &= mask; +} + +static void InitGetTID() { + if (pthread_key_create(&tid_key, FreeTID) != 0) { + // The logging system calls GetTID() so it can't be used here. + perror("pthread_key_create failed"); + abort(); + } + + // Initialize tid_array. + absl::base_internal::SpinLockHolder lock(&tid_lock); + tid_array = new std::vector(1); + (*tid_array)[0] = 1; // ID 0 is never-allocated. +} + +// Return a per-thread small integer ID from pthread's thread-specific data. +pid_t GetTID() { + absl::call_once(tid_once, InitGetTID); + + intptr_t tid = reinterpret_cast(pthread_getspecific(tid_key)); + if (tid != 0) { + return tid; + } + + int bit; // tid_array[word] = 1u << bit; + size_t word; + { + // Search for the first unused ID. + absl::base_internal::SpinLockHolder lock(&tid_lock); + // First search for a word in the array that is not all ones. + word = 0; + while (word < tid_array->size() && ~(*tid_array)[word] == 0) { + ++word; + } + if (word == tid_array->size()) { + tid_array->push_back(0); // No space left, add kBitsPerWord more IDs. + } + // Search for a zero bit in the word. + bit = 0; + while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { + ++bit; + } + tid = (word * kBitsPerWord) + bit; + (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. + } + + if (pthread_setspecific(tid_key, reinterpret_cast(tid)) != 0) { + perror("pthread_setspecific failed"); + abort(); + } + + return static_cast(tid); +} + +#endif + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/sysinfo.h b/absl/base/internal/sysinfo.h new file mode 100644 index 000000000..f21de1432 --- /dev/null +++ b/absl/base/internal/sysinfo.h @@ -0,0 +1,64 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include +#else +#include +#endif + +#include "absl/base/port.h" + +namespace absl { +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. See +// //base/cpuid/cpuid.h for more CPU-related info. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to +// the return types of GetProcessId() and GetThreadId() are both DWORD. +using pid_t = DWORD; +#endif +pid_t GetTID(); + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc new file mode 100644 index 000000000..4c7d66b7f --- /dev/null +++ b/absl/base/internal/sysinfo_test.cc @@ -0,0 +1,99 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifndef _WIN32 +#include +#include +#endif + +#include // NOLINT(build/c++11) +#include +#include + +#include "gtest/gtest.h" +#include "absl/synchronization/barrier.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +namespace base_internal { +namespace { + +TEST(SysinfoTest, NumCPUs) { + EXPECT_NE(NumCPUs(), 0) + << "NumCPUs() should not have the default value of 0"; +} + +TEST(SysinfoTest, NominalCPUFrequency) { +#if !(defined(__aarch64__) && defined(__linux__)) + EXPECT_GE(NominalCPUFrequency(), 1000.0) + << "NominalCPUFrequency() did not return a reasonable value"; +#else + // TODO(b/37919252): Aarch64 cannot read the CPU frequency from sysfs, so we + // get back 1.0. Fix once the value is available. + EXPECT_EQ(NominalCPUFrequency(), 1.0) + << "CPU frequency detection was fixed! Please update unittest and " + "b/37919252"; +#endif +} + +TEST(SysinfoTest, GetTID) { + EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. +#ifdef __native_client__ + // Native Client has a race condition bug that leads to memory + // exaustion when repeatedly creating and joining threads. + // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 + return; +#endif + // Test that TIDs are unique to each thread. + // Uses a few loops to exercise implementations that reallocate IDs. + for (int i = 0; i < 32; ++i) { + constexpr int kNumThreads = 64; + Barrier all_threads_done(kNumThreads); + std::vector threads; + + Mutex mutex; + std::unordered_set tids; + + for (int j = 0; j < kNumThreads; ++j) { + threads.push_back(std::thread([&]() { + pid_t id = GetTID(); + { + MutexLock lock(&mutex); + ASSERT_TRUE(tids.find(id) == tids.end()); + tids.insert(id); + } + // We can't simply join the threads here. The threads need to + // be alive otherwise the TID might have been reallocated to + // another live thread. + all_threads_done.Block(); + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +#ifdef __linux__ +TEST(SysinfoTest, LinuxGetTID) { + // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API. + EXPECT_EQ(GetTID(), getpid()); +} +#endif + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc new file mode 100644 index 000000000..ee96a5883 --- /dev/null +++ b/absl/base/internal/thread_identity.cc @@ -0,0 +1,126 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#ifndef _WIN32 +#include +#include +#endif + +#include +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +namespace absl { +namespace base_internal { + +#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +namespace { +// Used to co-ordinate one-time creation of our pthread_key +absl::once_flag init_thread_identity_key_once; +pthread_key_t thread_identity_pthread_key; +std::atomic pthread_key_initialized(false); + +void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { + pthread_key_create(&thread_identity_pthread_key, reclaimer); + pthread_key_initialized.store(true, std::memory_order_release); +} +} // namespace +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +// The actual TLS storage for a thread's currently associated ThreadIdentity. +// This is referenced by inline accessors in the header. +// "protected" visibility ensures that if multiple copies of //base exist in a +// process (via dlopen() or similar), references to +// thread_identity_ptr from each copy of the code will refer to +// *different* instances of this ptr. See extensive discussion of this choice +// in cl/90634708 +// TODO(ahh): hard deprecate multiple copies of //base; remove this. +#ifdef __GNUC__ +__attribute__((visibility("protected"))) +#endif // __GNUC__ + ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr; +#endif // TLS or CPP11 + +void SetCurrentThreadIdentity( + ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) { + assert(CurrentThreadIdentityIfPresent() == nullptr); + // Associate our destructor. + // NOTE: This call to pthread_setspecific is currently the only immovable + // barrier to CurrentThreadIdentity() always being async signal safe. +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + // b/18366710: + // We must mask signals around the call to setspecific as with current glibc, + // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) + // may zero our value. + // + // While not officially async-signal safe, getspecific within a signal handler + // is otherwise OK. + sigset_t all_signals; + sigset_t curr_signals; + sigfillset(&all_signals); + pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); + pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); + thread_identity_ptr = identity; +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_local std::unique_ptr + holder(identity, reclaimer); + thread_identity_ptr = identity; +#else +#error Unimplemented ABSL_THREAD_IDENTITY_MODE +#endif +} + +void ClearCurrentThreadIdentity() { +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_identity_ptr = nullptr; +#elif ABSL_THREAD_IDENTITY_MODE == \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // pthread_setspecific expected to clear value on destruction + assert(CurrentThreadIdentityIfPresent() == nullptr); +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +ThreadIdentity* CurrentThreadIdentityIfPresent() { + bool initialized = pthread_key_initialized.load(std::memory_order_acquire); + if (!initialized) { + return nullptr; + } + return reinterpret_cast( + pthread_getspecific(thread_identity_pthread_key)); +} +#endif + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h new file mode 100644 index 000000000..914d5da7e --- /dev/null +++ b/absl/base/internal/thread_identity.h @@ -0,0 +1,240 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include +#endif + +#include +#include + +#include "absl/base/internal/per_thread_tls.h" + +namespace absl { + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of base::Mutex and base::CondVar. +struct PerThreadSynch { + // The internal representation of base::Mutex and base::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() { + return reinterpret_cast(this); + } + + PerThreadSynch *next; // Circular waiter queue; initialized to 0. + PerThreadSynch *skip; // If non-zero, all entries in Mutex queue + // upto and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams *waitp; + + bool suppress_fatal_errors; // If true, try to proceed even in the face of + // broken invariants. This is used within fatal + // signal handlers to improve the chances of + // debug logging information being output + // successfully. + + intptr_t readers; // Number of readers in mutex. + int priority; // Priority of thread (updated every so often). + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { + kAvailable, + kQueued + }; + std::atomic state; + + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully releases + // the lock. It may not be set to false by a reader + // that decrements the count to non-zero. + // protected by mutex spinlock + + bool wake; // This thread is to be woken from a Mutex. + + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld *all_locks; +}; + +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + char data[128]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity* next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity* CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void*); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr; + +inline ThreadIdentity* CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +} // namespace absl +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc new file mode 100644 index 000000000..a2b053d96 --- /dev/null +++ b/absl/base/internal/thread_identity_test.cc @@ -0,0 +1,124 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#include // NOLINT(build/c++11) +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/spinlock.h" +#include "absl/synchronization/internal/per_thread_sem.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +namespace base_internal { +namespace { + +// protects num_identities_reused +static absl::base_internal::SpinLock map_lock( + absl::base_internal::kLinkerInitialized); +static int num_identities_reused; + +static const void* const kCheckNoIdentity = reinterpret_cast(1); + +static void TestThreadIdentityCurrent(const void* assert_no_identity) { + ThreadIdentity* identity; + + // We have to test this conditionally, because if the test framework relies + // on Abseil, then some previous action may have already allocated an + // identity. + if (assert_no_identity == kCheckNoIdentity) { + identity = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == nullptr); + } + + identity = synchronization_internal::GetOrCreateCurrentThreadIdentity(); + EXPECT_TRUE(identity != nullptr); + ThreadIdentity* identity_no_init; + identity_no_init = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == identity_no_init); + + // Check that per_thread_synch is correctly aligned. + EXPECT_EQ(0, reinterpret_cast(&identity->per_thread_synch) % + PerThreadSynch::kAlignment); + EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); + + absl::base_internal::SpinLockHolder l(&map_lock); + num_identities_reused++; +} + +TEST(ThreadIdentityTest, BasicIdentityWorks) { + // This tests for the main() thread. + TestThreadIdentityCurrent(nullptr); +} + +TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { + // Now try the same basic test with multiple threads being created and + // destroyed. This makes sure that: + // - New threads are created without a ThreadIdentity. + // - We re-allocate ThreadIdentity objects from the free-list. + // - If a thread implementation chooses to recycle threads, that + // correct re-initialization occurs. + static const int kNumLoops = 3; + static const int kNumThreads = 400; + for (int iter = 0; iter < kNumLoops; iter++) { + std::vector threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back( + std::thread(TestThreadIdentityCurrent, kCheckNoIdentity)); + } + for (auto& thread : threads) { + thread.join(); + } + } + + // We should have recycled ThreadIdentity objects above; while (external) + // library threads allocating their own identities may preclude some + // reuse, we should have sufficient repetitions to exclude this. + EXPECT_LT(kNumThreads, num_identities_reused); +} + +TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { + // This test repeatly creates and joins a series of threads, each of + // which acquires and releases shared Mutex locks. This verifies + // Mutex operations work correctly under a reused + // ThreadIdentity. Note that the most likely failure mode of this + // test is a crash or deadlock. + static const int kNumLoops = 10; + static const int kNumThreads = 12; + static const int kNumMutexes = 3; + static const int kNumLockLoops = 5; + + Mutex mutexes[kNumMutexes]; + for (int iter = 0; iter < kNumLoops; ++iter) { + std::vector threads; + for (int thread = 0; thread < kNumThreads; ++thread) { + threads.push_back(std::thread([&]() { + for (int l = 0; l < kNumLockLoops; ++l) { + for (int m = 0; m < kNumMutexes; ++m) { + MutexLock lock(&mutexes[m]); + } + } + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/throw_delegate.cc b/absl/base/internal/throw_delegate.cc new file mode 100644 index 000000000..46dc573cf --- /dev/null +++ b/absl/base/internal/throw_delegate.cc @@ -0,0 +1,106 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include +#include +#include +#include +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace absl { +namespace base_internal { + +namespace { +template +[[noreturn]] void Throw(const T& error) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw error; +#else + ABSL_RAW_LOG(ERROR, "%s", error.what()); + abort(); +#endif +} +} // namespace + +void ThrowStdLogicError(const std::string& what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdLogicError(const char* what_arg) { + Throw(std::logic_error(what_arg)); +} +void ThrowStdInvalidArgument(const std::string& what_arg) { + Throw(std::invalid_argument(what_arg)); +} +void ThrowStdInvalidArgument(const char* what_arg) { + Throw(std::invalid_argument(what_arg)); +} + +void ThrowStdDomainError(const std::string& what_arg) { + Throw(std::domain_error(what_arg)); +} +void ThrowStdDomainError(const char* what_arg) { + Throw(std::domain_error(what_arg)); +} + +void ThrowStdLengthError(const std::string& what_arg) { + Throw(std::length_error(what_arg)); +} +void ThrowStdLengthError(const char* what_arg) { + Throw(std::length_error(what_arg)); +} + +void ThrowStdOutOfRange(const std::string& what_arg) { + Throw(std::out_of_range(what_arg)); +} +void ThrowStdOutOfRange(const char* what_arg) { + Throw(std::out_of_range(what_arg)); +} + +void ThrowStdRuntimeError(const std::string& what_arg) { + Throw(std::runtime_error(what_arg)); +} +void ThrowStdRuntimeError(const char* what_arg) { + Throw(std::runtime_error(what_arg)); +} + +void ThrowStdRangeError(const std::string& what_arg) { + Throw(std::range_error(what_arg)); +} +void ThrowStdRangeError(const char* what_arg) { + Throw(std::range_error(what_arg)); +} + +void ThrowStdOverflowError(const std::string& what_arg) { + Throw(std::overflow_error(what_arg)); +} +void ThrowStdOverflowError(const char* what_arg) { + Throw(std::overflow_error(what_arg)); +} + +void ThrowStdUnderflowError(const std::string& what_arg) { + Throw(std::underflow_error(what_arg)); +} +void ThrowStdUnderflowError(const char* what_arg) { + Throw(std::underflow_error(what_arg)); +} + +void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } + +void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } + +} // namespace base_internal +} // namespace absl diff --git a/absl/base/internal/throw_delegate.h b/absl/base/internal/throw_delegate.h new file mode 100644 index 000000000..70e2d7709 --- /dev/null +++ b/absl/base/internal/throw_delegate.h @@ -0,0 +1,71 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include + +namespace absl { +namespace base_internal { + +// Helper functions that allow throwing exceptions consistently from anywhere. +// The main use case is for header-based libraries (eg templates), as they will +// be built by many different targets with their own compiler options. +// In particular, this will allow a safe way to throw exceptions even if the +// caller is compiled with -fno-exceptions. This is intended for implementing +// things like map<>::at(), which the standard documents as throwing an +// exception on error. +// +// Using other techniques like #if tricks could lead to ODR violations. +// +// You shouldn't use it unless you're writing code that you know will be built +// both with and without exceptions and you need to conform to an interface +// that uses exceptions. + +[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); +[[noreturn]] void ThrowStdLogicError(const char* what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); +[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); +[[noreturn]] void ThrowStdDomainError(const char* what_arg); +[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); +[[noreturn]] void ThrowStdLengthError(const char* what_arg); +[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); +[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); +[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); +[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRangeError(const char* what_arg); +[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdOverflowError(const char* what_arg); +[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + +[[noreturn]] void ThrowStdBadFunctionCall(); +[[noreturn]] void ThrowStdBadAlloc(); + +// ThrowStdBadArrayNewLength() cannot be consistently supported because +// std::bad_array_new_length is missing in libstdc++ until 4.9.0. +// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html +// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html +// libcxx (as of 3.2) and msvc (as of 2015) both have it. +// [[noreturn]] void ThrowStdBadArrayNewLength(); + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/absl/base/internal/tsan_mutex_interface.h b/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 000000000..a1303e67c --- /dev/null +++ b/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,51 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +#ifdef THREAD_SANITIZER +#include + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h new file mode 100644 index 000000000..ea30829b0 --- /dev/null +++ b/absl/base/internal/unaligned_access.h @@ -0,0 +1,256 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include +#include + +#include "absl/base/attributes.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. +// On some platforms, like ARM, the copy functions can be more efficient +// then a load and a store. +// +// It is possible to implement all of these these using constant-length memcpy +// calls, which is portable and will usually be inlined into simple loads and +// stores if the architecture supports it. However, such inlining usually +// happens in a pass that's quite late in compilation, which means the resulting +// loads and stores cannot participate in many other optimizations, leading to +// overall worse code. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) + +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ + defined(MEMORY_SANITIZER) +// Consider we have an unaligned load/store of 4 bytes from address 0x...05. +// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and +// will miss a bug if 08 is the first unaddressable byte. +// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will +// miss a race between this access and some other accesses to 08. +// MemorySanitizer will correctly propagate the shadow on unaligned stores +// and correctly report bugs on unaligned loads, but it may not properly +// update and report the origin of the uninitialized memory. +// For all three tools, replacing an unaligned access with a tool-specific +// callback solves the problem. + +// Make sure uint16_t/uint32_t/uint64_t are defined. +#include + +extern "C" { +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +} // extern "C" + +namespace absl { + +inline uint16_t UnalignedLoad16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32_t UnalignedLoad32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void UnalignedStore16(void *p, uint16_t v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void UnalignedStore32(void *p, uint32_t v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); +} + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386) || \ + defined(_M_IX86) || defined(__ppc__) || defined(__PPC__) || \ + defined(__ppc64__) || defined(__PPC64__) + +// x86 and x86-64 can perform unaligned loads/stores directly; +// modern PowerPC hardware can also do unaligned integer loads and stores; +// but note: the FPU still sends unaligned loads and stores to a trap handler! + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (*reinterpret_cast(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (*reinterpret_cast(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (*reinterpret_cast(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) + +#elif defined(__arm__) && \ + !defined(__ARM_ARCH_5__) && \ + !defined(__ARM_ARCH_5T__) && \ + !defined(__ARM_ARCH_5TE__) && \ + !defined(__ARM_ARCH_5TEJ__) && \ + !defined(__ARM_ARCH_6__) && \ + !defined(__ARM_ARCH_6J__) && \ + !defined(__ARM_ARCH_6K__) && \ + !defined(__ARM_ARCH_6Z__) && \ + !defined(__ARM_ARCH_6ZK__) && \ + !defined(__ARM_ARCH_6T2__) + + +// ARMv7 and newer support native unaligned accesses, but only of 16-bit +// and 32-bit values (not 64-bit); older versions either raise a fatal signal, +// do an unaligned read and rotate the words around a bit, or do the reads very +// slowly (trip through kernel mode). There's no simple #define that says just +// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6 +// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define, +// so in time, maybe we can move on to that. +// +// This is a mess, but there's not much we can do about it. +// +// To further complicate matters, only LDR instructions (single reads) are +// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we +// explicitly tell the compiler that these accesses can be unaligned, it can and +// will combine accesses. On armcc, the way to signal this is done by accessing +// through the type (uint32_t __packed *), but GCC has no such attribute +// (it ignores __attribute__((packed)) on individual variables). However, +// we can tell it that a _struct_ is unaligned, which has the same effect, +// so we do that. + +namespace absl { +namespace internal { + +struct Unaligned16Struct { + uint16_t value; + uint8_t dummy; // To make the size non-power-of-two. +} ABSL_ATTRIBUTE_PACKED; + +struct Unaligned32Struct { + uint32_t value; + uint8_t dummy; // To make the size non-power-of-two. +} ABSL_ATTRIBUTE_PACKED; + +} // namespace internal +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + ((reinterpret_cast(_p))->value) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + ((reinterpret_cast(_p))->value) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + ((reinterpret_cast< ::absl::internal::Unaligned16Struct *>(_p))->value = \ + (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + ((reinterpret_cast< ::absl::internal::Unaligned32Struct *>(_p))->value = \ + (_val)) + +namespace absl { + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#else + +// ABSL_INTERNAL_NEED_ALIGNED_LOADS is defined when the underlying platform +// doesn't support unaligned access. +#define ABSL_INTERNAL_NEED_ALIGNED_LOADS + +// These functions are provided for architectures that don't support +// unaligned loads and stores. + +namespace absl { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#endif + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc new file mode 100644 index 000000000..a12d68bd1 --- /dev/null +++ b/absl/base/internal/unscaledcycleclock.cc @@ -0,0 +1,101 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/unscaledcycleclock.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +#if defined(_WIN32) +#include +#endif + +#if defined(__powerpc__) || defined(__ppc__) +#include +#endif + +#include "absl/base/internal/sysinfo.h" + +namespace absl { +namespace base_internal { + +#if defined(__i386__) + +int64_t UnscaledCycleClock::Now() { + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__x86_64__) + +int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__powerpc__) || defined(__ppc__) + +int64_t UnscaledCycleClock::Now() { + return __ppc_get_timebase(); +} + +double UnscaledCycleClock::Frequency() { + return __ppc_get_timebase_freq(); +} + +#elif defined(__aarch64__) + +// System timer of ARMv8 runs at a different frequency than the CPU's. +// The frequency is fixed, typically in the range 1-50MHz. It can be +// read at CNTFRQ special register. We assume the OS has set up +// the virtual timer properly. +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + uint64_t aarch64_timer_frequency; + asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency)); + return aarch64_timer_frequency; +} + +#elif defined(_M_IX86) || defined(_M_X64) + +#pragma intrinsic(__rdtsc) + +int64_t UnscaledCycleClock::Now() { + return __rdtsc(); +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#endif + +} // namespace base_internal +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 000000000..ddf6a5e5a --- /dev/null +++ b/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,118 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal / whitelisted use only, you should consider +// using CycleClock instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include + +#if defined(__APPLE__) +#include +#endif + +#include "absl/base/port.h" + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || \ + defined(_M_IX86) || defined(_M_X64) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || TARGET_OS_IPHONE || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif +namespace absl { +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; +class UnscaledCycleClock { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Whitelisted friends. + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +} // namespace base_internal +} // namespace absl +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/absl/base/invoke_test.cc b/absl/base/invoke_test.cc new file mode 100644 index 000000000..2bbfc4775 --- /dev/null +++ b/absl/base/invoke_test.cc @@ -0,0 +1,199 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/invoke.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace absl { +namespace base_internal { +namespace { + +int Function(int a, int b) { return a - b; } + +int Sink(std::unique_ptr p) { + return *p; +} + +std::unique_ptr Factory(int n) { + return make_unique(n); +} + +void NoOp() {} + +struct ConstFunctor { + int operator()(int a, int b) const { return a - b; } +}; + +struct MutableFunctor { + int operator()(int a, int b) { return a - b; } +}; + +struct EphemeralFunctor { + int operator()(int a, int b) && { return a - b; } +}; + +struct OverloadedFunctor { + template + std::string operator()(const Args&... args) & { + return StrCat("&", args...); + } + template + std::string operator()(const Args&... args) const& { + return StrCat("const&", args...); + } + template + std::string operator()(const Args&... args) && { + return StrCat("&&", args...); + } +}; + +struct Class { + int Method(int a, int b) { return a - b; } + int ConstMethod(int a, int b) const { return a - b; } + + int member; +}; + +struct FlipFlop { + int ConstMethod() const { return member; } + FlipFlop operator*() const { return {-member}; } + + int member; +}; + +// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending +// on which one is valid. +template +decltype(Invoke(std::declval())) CallMaybeWithArg(const F& f) { + return Invoke(f); +} + +template +decltype(Invoke(std::declval(), 42)) CallMaybeWithArg(const F& f) { + return Invoke(f, 42); +} + +TEST(InvokeTest, Function) { + EXPECT_EQ(1, Invoke(Function, 3, 2)); + EXPECT_EQ(1, Invoke(&Function, 3, 2)); +} + +TEST(InvokeTest, NonCopyableArgument) { + EXPECT_EQ(42, Invoke(Sink, make_unique(42))); +} + +TEST(InvokeTest, NonCopyableResult) { + EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42)); +} + +TEST(InvokeTest, VoidResult) { + Invoke(NoOp); +} + +TEST(InvokeTest, ConstFunctor) { + EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2)); +} + +TEST(InvokeTest, MutableFunctor) { + MutableFunctor f; + EXPECT_EQ(1, Invoke(f, 3, 2)); + EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2)); +} + +TEST(InvokeTest, EphemeralFunctor) { + EphemeralFunctor f; + EXPECT_EQ(1, Invoke(std::move(f), 3, 2)); + EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2)); +} + +TEST(InvokeTest, OverloadedFunctor) { + OverloadedFunctor f; + const OverloadedFunctor& cf = f; + + EXPECT_EQ("&", Invoke(f)); + EXPECT_EQ("& 42", Invoke(f, " 42")); + + EXPECT_EQ("const&", Invoke(cf)); + EXPECT_EQ("const& 42", Invoke(cf, " 42")); + + EXPECT_EQ("&&", Invoke(std::move(f))); + EXPECT_EQ("&& 42", Invoke(std::move(f), " 42")); +} + +TEST(InvokeTest, ReferenceWrapper) { + ConstFunctor cf; + MutableFunctor mf; + EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2)); + EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2)); + EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2)); +} + +TEST(InvokeTest, MemberFunction) { + std::unique_ptr p(new Class); + std::unique_ptr cp(new Class); + EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2)); + + EXPECT_EQ(1, Invoke(&Class::Method, make_unique(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique(), 3, 2)); + EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique(), 3, 2)); +} + +TEST(InvokeTest, DataMember) { + std::unique_ptr p(new Class{42}); + std::unique_ptr cp(new Class{42}); + EXPECT_EQ(42, Invoke(&Class::member, p)); + EXPECT_EQ(42, Invoke(&Class::member, *p)); + EXPECT_EQ(42, Invoke(&Class::member, p.get())); + + Invoke(&Class::member, p) = 42; + Invoke(&Class::member, p.get()) = 42; + + EXPECT_EQ(42, Invoke(&Class::member, cp)); + EXPECT_EQ(42, Invoke(&Class::member, *cp)); + EXPECT_EQ(42, Invoke(&Class::member, cp.get())); +} + +TEST(InvokeTest, FlipFlop) { + FlipFlop obj = {42}; + // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or + // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. + EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj)); + EXPECT_EQ(42, Invoke(&FlipFlop::member, obj)); +} + +TEST(InvokeTest, SfinaeFriendly) { + CallMaybeWithArg(NoOp); + EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); +} + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/macros.h b/absl/base/macros.h new file mode 100644 index 000000000..259970fea --- /dev/null +++ b/absl/base/macros.h @@ -0,0 +1,201 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. +// + +#ifndef ABSL_BASE_MACROS_H_ +#define ABSL_BASE_MACROS_H_ + +#include + +#include "absl/base/port.h" + +// ABSL_ARRAYSIZE() +// +// Returns the # of elements in an array as a compile-time constant, which can +// be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +// +// Note: this template function declaration is used in defining arraysize. +// Note that the function doesn't need an implementation, as we only +// use its type. +namespace absl { +namespace macros_internal { +template +char (&ArraySizeHelper(T (&array)[N]))[N]; +} // namespace macros_internal +} // namespace absl +#define ABSL_ARRAYSIZE(array) \ + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + +// kLinkerInitialized +// +// An enum used only as a constructor argument to indicate that a variable has +// static storage duration, and that the constructor should do nothing to its +// state. Use of this macro indicates to the reader that it is legal to +// declare a static instance of the class, provided the constructor is given +// the absl::base_internal::kLinkerInitialized argument. +// +// Normally, it is unsafe to declare a static variable that has a constructor or +// a destructor because invocation order is undefined. However, if the type can +// be zero-initialized (which the loader does for static variables) into a valid +// state and the type's destructor does not affect storage, then a constructor +// for static initialization can be declared. +// +// Example: +// // Declaration +// explicit MyClass(absl::base_internal:LinkerInitialized x) {} +// +// // Invocation +// static MyClass my_global(absl::base_internal::kLinkerInitialized); +namespace absl { +namespace base_internal { +enum LinkerInitialized { + kLinkerInitialized = 0, +}; +} // namespace base_internal +} // namespace absl + +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED +// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed +// when performing switch labels fall-through diagnostic +// (`-Wimplicit-fallthrough`). See clang documentation on language extensions +// for details: +// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro +// has no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#endif + +// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported. +#if defined(__clang__) && defined(__has_warning) +#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#endif +#elif defined(__GNUC__) && __GNUC__ >= 7 +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#endif + +#ifndef ABSL_FALLTHROUGH_INTENDED +#define ABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// Example: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// ABSL_DEPRECATED("Use Baz instead") void Bar() {...} +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// clang's `-Wdeprecated-declarations` option. This option is turned off by +// default, but the warnings will be reported by go/clang-tidy. +#if defined(__clang__) && __cplusplus >= 201103L && defined(__has_warning) +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#endif + +#ifndef ABSL_DEPRECATED +#define ABSL_DEPRECATED(message) +#endif + +// ABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// http://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef ABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef ABSL_BAD_CALL_IF +// int isdigit(int c) +// ABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // ABSL_BAD_CALL_IF + +#if defined(__clang__) +# if __has_attribute(enable_if) +# define ABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +# endif +#endif + +// ABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return ABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define ABSL_ASSERT(expr) (false ? (void)(expr) : (void)0) +#else +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? (void)0 : [] { assert(false && #expr); }()) +#endif + +#endif // ABSL_BASE_MACROS_H_ diff --git a/absl/base/optimization.h b/absl/base/optimization.h new file mode 100644 index 000000000..db8beafa3 --- /dev/null +++ b/absl/base/optimization.h @@ -0,0 +1,164 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef ABSL_BASE_OPTIMIZATION_H_ +#define ABSL_BASE_OPTIMIZATION_H_ + +#include "absl/base/config.h" + +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. Use of this +// macro is useful when you wish to preserve the existing function order within +// a stack trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +// The __nop() intrinsic blocks the optimisation. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#endif + +// ABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define ABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define ABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define ABSL_CACHELINE_SIZE 64 +#endif +#endif + +#ifndef ABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define ABSL_CACHELINE_SIZE 64 +#endif + +// ABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: this macro should be replaced with usage of `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when available within C++17. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to +// `__attribute__((aligned(ABSL_CACHELINE_SIZE)))`. For compilers where this is +// not known to work, the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) + +#else // not GCC +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED +#endif + +// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (ABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define ABSL_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#else +#define ABSL_PREDICT_FALSE(x) x +#define ABSL_PREDICT_TRUE(x) x +#endif + +#endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/absl/base/policy_checks.h b/absl/base/policy_checks.h new file mode 100644 index 000000000..17c05c146 --- /dev/null +++ b/absl/base/policy_checks.h @@ -0,0 +1,99 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef ABSL_BASE_POLICY_CHECKS_H_ +#define ABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Compiler Check +// ----------------------------------------------------------------------------- + +// We support MSVC++ 14.0 update 2 and later. +// This minimum will go up. +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 +#error "This package requires Visual Studio 2015 Update 2 or higher" +#endif + +// We support gcc 4.7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) +#error "This package requires gcc 4.7 or higher" +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher" +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++11 as the minimum. Note that Visual Studio has not +// advanced __cplusplus despite being good enough for our purposes, so +// so we exempt it from the check. +#if defined(__cplusplus) && !defined(_MSC_VER) +#if __cplusplus < 201103L +#error "C++ versions less than C++11 are not supported." +#endif +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +// We have chosen glibc 2.12 as the minimum as it was tagged for release +// in May, 2010 and includes some functionality used in Google software +// (for instance pthread_setname_np): +// https://sourceware.org/ml/libc-alpha/2010-05/msg00000.html +#ifdef __GLIBC_PREREQ +#if !__GLIBC_PREREQ(2, 12) +#error "Minimum required version of glibc is 2.12." +#endif +#endif + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +#endif // ABSL_BASE_POLICY_CHECKS_H_ diff --git a/absl/base/port.h b/absl/base/port.h new file mode 100644 index 000000000..1c67257fd --- /dev/null +++ b/absl/base/port.h @@ -0,0 +1,26 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. +// This file is used for both C and C++! + +#ifndef ABSL_BASE_PORT_H_ +#define ABSL_BASE_PORT_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#endif // ABSL_BASE_PORT_H_ diff --git a/absl/base/raw_logging_test.cc b/absl/base/raw_logging_test.cc new file mode 100644 index 000000000..dae4b3513 --- /dev/null +++ b/absl/base/raw_logging_test.cc @@ -0,0 +1,50 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This test serves primarily as a compilation test for base/raw_logging.h. +// Raw logging testing is covered by logging_unittest.cc, which is not as +// portable as this test. + +#include "absl/base/internal/raw_logging.h" + +#include "gtest/gtest.h" + +namespace { + +TEST(RawLoggingCompilationTest, Log) { + ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); + ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); +} + +TEST(RawLoggingCompilationTest, PassingCheck) { + ABSL_RAW_CHECK(true, "RAW CHECK"); +} + +// Not all platforms support output from raw log, so we don't verify any +// particular output for RAW check failures (expecting the empty std::string +// accomplishes this). This test is primarily a compilation test, but we +// are verifying process death when EXPECT_DEATH works for a platform. +const char kExpectedDeathOutput[] = ""; + +TEST(RawLoggingDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(RawLoggingDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +} // namespace diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc new file mode 100644 index 000000000..b1212eaf7 --- /dev/null +++ b/absl/base/spinlock_test_common.cc @@ -0,0 +1,265 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A bunch of threads repeatedly hash an array of ints protected by a +// spinlock. If the spinlock is working properly, all elements of the +// array should be equal at the end of the test. + +#include +#include +#include +#include // NOLINT(build/c++11) +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/notification.h" + +constexpr int32_t kNumThreads = 10; +constexpr int32_t kIters = 1000; + +namespace absl { +namespace base_internal { + +// This is defined outside of anonymous namespace so that it can be +// a friend of SpinLock to access protected methods for testing. +struct SpinLockTest { + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); + } + static uint64_t DecodeWaitCycles(uint32_t lock_value) { + return SpinLock::DecodeWaitCycles(lock_value); + } +}; + +namespace { + +static constexpr int kArrayLength = 10; +static uint32_t values[kArrayLength]; + +static SpinLock static_spinlock(base_internal::kLinkerInitialized); +static SpinLock static_cooperative_spinlock( + base_internal::kLinkerInitialized, + base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); +static SpinLock static_noncooperative_spinlock( + base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY); + +// Simple integer hash function based on the public domain lookup2 hash. +// http://burtleburtle.net/bob/c/lookup2.c +static uint32_t Hash32(uint32_t a, uint32_t c) { + uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value. + a -= b; a -= c; a ^= (c >> 13); + b -= c; b -= a; b ^= (a << 8); + c -= a; c -= b; c ^= (b >> 13); + a -= b; a -= c; a ^= (c >> 12); + b -= c; b -= a; b ^= (a << 16); + c -= a; c -= b; c ^= (b >> 5); + a -= b; a -= c; a ^= (c >> 3); + b -= c; b -= a; b ^= (a << 10); + c -= a; c -= b; c ^= (b >> 15); + return c; +} + +static void TestFunction(int thread_salt, SpinLock* spinlock) { + for (int i = 0; i < kIters; i++) { + SpinLockHolder h(spinlock); + for (int j = 0; j < kArrayLength; j++) { + const int index = (j + thread_salt) % kArrayLength; + values[index] = Hash32(values[index], thread_salt); + std::this_thread::yield(); + } + } +} + +static void ThreadedTest(SpinLock* spinlock) { + std::vector threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back(std::thread(TestFunction, i, spinlock)); + } + for (auto& thread : threads) { + thread.join(); + } + + SpinLockHolder h(spinlock); + for (int i = 1; i < kArrayLength; i++) { + EXPECT_EQ(values[0], values[i]); + } +} + +TEST(SpinLock, StackNonCooperativeDisablesScheduling) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + spinlock.Unlock(); +} + +TEST(SpinLock, StaticNonCooperativeDisablesScheduling) { + static_noncooperative_spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + static_noncooperative_spinlock.Unlock(); +} + +TEST(SpinLock, WaitCyclesEncoding) { + // These are implementation details not exported by SpinLock. + const int kProfileTimestampShift = 7; + const int kLockwordReservedShift = 3; + const uint32_t kSpinLockSleeper = 8; + + // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping + // but the lower kProfileTimestampShift will be dropped. + const int kMaxCyclesShift = + 32 - kLockwordReservedShift + kProfileTimestampShift; + const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; + + // These bits should be zero after encoding. + const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; + + // These bits are dropped when wait cycles are encoded. + const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; + + // Test a bunch of random values + std::default_random_engine generator; + // Shift to avoid overflow below. + std::uniform_int_distribution time_distribution( + 0, std::numeric_limits::max() >> 4); + std::uniform_int_distribution cycle_distribution(0, kMaxCycles); + + for (int i = 0; i < 100; i++) { + int64_t start_time = time_distribution(generator); + int64_t cycles = cycle_distribution(generator); + int64_t end_time = start_time + cycles; + uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); + EXPECT_EQ(0, lock_value & kLockwordReservedMask); + uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); + EXPECT_EQ(0, decoded & kProfileTimestampMask); + EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); + } + + // Test corner cases + int64_t start_time = time_distribution(generator); + EXPECT_EQ(0, SpinLockTest::EncodeWaitCycles(start_time, start_time)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask)); + EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask, + SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask)); + + // Check that we cannot produce kSpinLockSleeper during encoding. + int64_t sleeper_cycles = + kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift); + uint32_t sleeper_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles); + EXPECT_NE(sleeper_value, kSpinLockSleeper); + + // Test clamping + uint32_t max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); + uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); + uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; + EXPECT_EQ(expected_max_value_decoded, max_value_decoded); + + const int64_t step = (1 << kProfileTimestampShift); + uint32_t after_max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); + uint64_t after_max_value_decoded = + SpinLockTest::DecodeWaitCycles(after_max_value); + EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); + + uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( + start_time, start_time + kMaxCycles - step); + uint64_t before_max_value_decoded = + SpinLockTest::DecodeWaitCycles(before_max_value); + EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); +} + +TEST(SpinLockWithThreads, StaticSpinLock) { + ThreadedTest(&static_spinlock); +} + +TEST(SpinLockWithThreads, StackSpinLock) { + SpinLock spinlock; + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StaticCooperativeSpinLock) { + ThreadedTest(&static_cooperative_spinlock); +} + +TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) { + ThreadedTest(&static_noncooperative_spinlock); +} + +TEST(SpinLockWithThreads, DoesNotDeadlock) { + struct Helper { + static void NotifyThenLock(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + locked->WaitForNotification(); // Wait for LockThenWait() to hold "s". + b->DecrementCount(); + SpinLockHolder l(spinlock); + } + + static void LockThenWait(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + SpinLockHolder l(spinlock); + locked->Notify(); + b->Wait(); + } + + static void DeadlockTest(SpinLock* spinlock, int num_spinners) { + Notification locked; + BlockingCounter counter(num_spinners); + std::vector threads; + + threads.push_back( + std::thread(Helper::LockThenWait, &locked, spinlock, &counter)); + for (int i = 0; i < num_spinners; ++i) { + threads.push_back( + std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter)); + } + + for (auto& thread : threads) { + thread.join(); + } + } + }; + + SpinLock stack_cooperative_spinlock( + base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + Helper::DeadlockTest(&stack_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&stack_noncooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_noncooperative_spinlock, + base_internal::NumCPUs() * 2); +} + +} // namespace +} // namespace base_internal +} // namespace absl diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h new file mode 100644 index 000000000..025a8548d --- /dev/null +++ b/absl/base/thread_annotations.h @@ -0,0 +1,247 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. +// + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ +#if defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +// GUARDED_BY() +// +// Documents if a shared variable/field needs to be protected by a mutex. +// GUARDED_BY() allows the user to specify a particular mutex that should be +// held when accessing the annotated variable. +// +// Example: +// +// Mutex mu; +// int p1 GUARDED_BY(mu); +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) +#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded) + +// PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// Mutex mu; +// int *p1 PT_GUARDED_BY(mu); +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q`, guarded by `mu1`, points to a shared memory location that is +// // guarded by `mu2`: +// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2); +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) +#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded) + +// ACQUIRED_AFTER() / ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER +// and ACQUIRED_BEFORE.) +// +// Example: +// +// Mutex m1; +// Mutex m2 ACQUIRED_AFTER(m1); +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// Example: +// +// Mutex mu1, mu2; +// int a GUARDED_BY(mu1); +// int b GUARDED_BY(mu2); +// +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }; +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +// LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +// LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with LOCK_RETURNED. +#define LOCK_RETURNED(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +// LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(lockable) + +// SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define SCOPED_LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +// EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + +// SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + +// UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + +// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + +// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + +// NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TS_UNCHECKED(x) "" + +// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to TS_UNCHECKED. +#define TS_FIXME(x) "" + +// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of +// a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS + +// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY +// annotation that needs to be fixed, because it is producing thread safety +// warning. It disables the GUARDED_BY. +#define GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) + + +namespace thread_safety_analysis { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +template +inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace thread_safety_analysis + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/absl/base/throw_delegate_test.cc b/absl/base/throw_delegate_test.cc new file mode 100644 index 000000000..f9494452e --- /dev/null +++ b/absl/base/throw_delegate_test.cc @@ -0,0 +1,95 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +using absl::base_internal::ThrowStdLogicError; +using absl::base_internal::ThrowStdInvalidArgument; +using absl::base_internal::ThrowStdDomainError; +using absl::base_internal::ThrowStdLengthError; +using absl::base_internal::ThrowStdOutOfRange; +using absl::base_internal::ThrowStdRuntimeError; +using absl::base_internal::ThrowStdRangeError; +using absl::base_internal::ThrowStdOverflowError; +using absl::base_internal::ThrowStdUnderflowError; +using absl::base_internal::ThrowStdBadFunctionCall; +using absl::base_internal::ThrowStdBadAlloc; + +constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog"; + +template +void ExpectThrowChar(void (*f)(const char*)) { + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +} + +template +void ExpectThrowString(void (*f)(const std::string&)) { + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +} + +template +void ExpectThrowNoWhat(void (*f)()) { + try { + f(); + FAIL() << "Didn't throw"; + } catch (const E& e) { + } +} + +TEST(ThrowHelper, Test) { + // Not using EXPECT_THROW because we want to check the .what() message too. + ExpectThrowChar(ThrowStdLogicError); + ExpectThrowChar(ThrowStdInvalidArgument); + ExpectThrowChar(ThrowStdDomainError); + ExpectThrowChar(ThrowStdLengthError); + ExpectThrowChar(ThrowStdOutOfRange); + ExpectThrowChar(ThrowStdRuntimeError); + ExpectThrowChar(ThrowStdRangeError); + ExpectThrowChar(ThrowStdOverflowError); + ExpectThrowChar(ThrowStdUnderflowError); + + ExpectThrowString(ThrowStdLogicError); + ExpectThrowString(ThrowStdInvalidArgument); + ExpectThrowString(ThrowStdDomainError); + ExpectThrowString(ThrowStdLengthError); + ExpectThrowString(ThrowStdOutOfRange); + ExpectThrowString(ThrowStdRuntimeError); + ExpectThrowString(ThrowStdRangeError); + ExpectThrowString(ThrowStdOverflowError); + ExpectThrowString(ThrowStdUnderflowError); + + ExpectThrowNoWhat(ThrowStdBadFunctionCall); + ExpectThrowNoWhat(ThrowStdBadAlloc); +} + +} // namespace diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel new file mode 100644 index 000000000..625cef106 --- /dev/null +++ b/absl/container/BUILD.bazel @@ -0,0 +1,124 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_TEST_COPTS", +) +load( + "//absl:test_dependencies.bzl", + "GUNIT_MAIN_DEPS_SELECTOR", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "fixed_array", + hdrs = ["fixed_array.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/algorithm", + "//absl/base:core_headers", + "//absl/base:dynamic_annotations", + "//absl/base:throw_delegate", + ], +) + +cc_test( + name = "fixed_array_test", + srcs = ["fixed_array_test.cc"], + copts = ABSL_TEST_COPTS + ["-fexceptions"], + deps = [ + ":fixed_array", + "//absl/base:core_headers", + "//absl/base:exception_testing", + "//absl/memory", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "fixed_array_test_noexceptions", + srcs = ["fixed_array_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":fixed_array", + "//absl/base:core_headers", + "//absl/base:exception_testing", + "//absl/memory", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_library( + name = "inlined_vector", + hdrs = ["inlined_vector.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/algorithm", + "//absl/base:core_headers", + "//absl/base:throw_delegate", + "//absl/memory", + ], +) + +cc_test( + name = "inlined_vector_test", + srcs = ["inlined_vector_test.cc"], + copts = ABSL_TEST_COPTS + ["-fexceptions"], + deps = [ + ":inlined_vector", + ":test_instance_tracker", + "//absl/base", + "//absl/base:core_headers", + "//absl/base:exception_testing", + "//absl/memory", + "//absl/strings", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_test( + name = "inlined_vector_test_noexceptions", + srcs = ["inlined_vector_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":inlined_vector", + ":test_instance_tracker", + "//absl/base", + "//absl/base:core_headers", + "//absl/base:exception_testing", + "//absl/memory", + "//absl/strings", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) + +cc_library( + name = "test_instance_tracker", + testonly = 1, + srcs = ["internal/test_instance_tracker.cc"], + hdrs = ["internal/test_instance_tracker.h"], + copts = ABSL_DEFAULT_COPTS, +) + +cc_test( + name = "test_instance_tracker_test", + srcs = ["internal/test_instance_tracker_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":test_instance_tracker", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h new file mode 100644 index 000000000..20bde2728 --- /dev/null +++ b/absl/container/fixed_array.h @@ -0,0 +1,493 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: fixed_array.h +// ----------------------------------------------------------------------------- +// +// A `FixedArray` represents a non-resizable array of `T` where the length of +// the array can be determined at run-time. It is a good replacement for +// non-standard and deprecated uses of `alloca()` and variable length arrays +// within the GCC extension. (See +// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). +// +// `FixedArray` allocates small arrays inline, keeping performance fast by +// avoiding heap operations. It also helps reduce the chances of +// accidentally overflowing your stack if large input is passed to +// your function. + +#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_ +#define ABSL_CONTAINER_FIXED_ARRAY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +namespace absl { + +constexpr static auto kFixedArrayUseDefault = static_cast(-1); + +// ----------------------------------------------------------------------------- +// FixedArray +// ----------------------------------------------------------------------------- +// +// A `FixedArray` provides a run-time fixed-size array, allocating small arrays +// inline for efficiency and correctness. +// +// Most users should not specify an `inline_elements` argument and let +// `FixedArray<>` automatically determine the number of elements +// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the +// `FixedArray<>` implementation will inline arrays of +// length <= `inline_elements`. +// +// Note that a `FixedArray` constructed with a `size_type` argument will +// default-initialize its values by leaving trivially constructible types +// uninitialized (e.g. int, int[4], double), and others default-constructed. +// This matches the behavior of c-style arrays and `std::array`, but not +// `std::vector`. +// +// Note that `FixedArray` does not provide a public allocator; if it requires a +// heap allocation, it will do so with global `::operator new[]()` and +// `::operator delete[]()`, even if T provides class-scope overrides for these +// operators. +template +class FixedArray { + static constexpr size_t kInlineBytesDefault = 256; + + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = typename std::enable_if< + std::is_convertible< + typename std::iterator_traits::iterator_category, + std::forward_iterator_tag>::value, + int>::type; + + public: + // For playing nicely with stl: + using value_type = T; + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using reference = T&; + using const_reference = const T&; + using pointer = T*; + using const_pointer = const T*; + using difference_type = ptrdiff_t; + using size_type = size_t; + + static constexpr size_type inline_elements = + inlined == kFixedArrayUseDefault + ? kInlineBytesDefault / sizeof(value_type) + : inlined; + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n) : rep_(n) {} + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, const value_type& val) : rep_(n, val) {} + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iter must be a forward_iterator or better. + template = 0> + FixedArray(Iter first, Iter last) : rep_(first, last) {} + + // Create the array from an initializer_list. + FixedArray(std::initializer_list init_list) + : FixedArray(init_list.begin(), init_list.end()) {} + + ~FixedArray() {} + + // Copy and move construction and assignment are deleted because (1) you can't + // copy or move an array, (2) assignment breaks the invariant that the size of + // a `FixedArray` never changes, and (3) there's no clear answer as to what + // should happen to a moved-from `FixedArray`. + FixedArray(const FixedArray&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const { return rep_.size(); } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const { + return std::numeric_limits::max() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const { return size() == 0; } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const { return size() * sizeof(value_type); } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const { return AsValue(rep_.begin()); } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() { return AsValue(rep_.begin()); } + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) { + assert(i < size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const { + assert(i < size()); + return data()[i]; + } + + // FixedArray::at + // + // Bounds-checked access. Returns a reference to the ith element of the + // fiexed array, or throws std::out_of_range + reference at(size_type i) { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // Overload of FixedArray::at() to return a const reference to the ith element + // of the fixed array. + const_reference at(size_type i) const { + if (i >= size()) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() { return *begin(); } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const { return *begin(); } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() { return *(end() - 1); } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const { return *(end() - 1); } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() { return data(); } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const { return data(); } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const { return begin(); } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() { return data() + size(); } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const { return data() + size(); } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const { return end(); } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() { return reverse_iterator(end()); } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const { return rbegin(); } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() { return reverse_iterator(begin()); } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const { return rend(); } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const T& value) { std::fill(begin(), end(), value); } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { + return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), + rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs < rhs); + } + + private: + // HolderTraits + // + // Wrapper to hold elements of type T for the case where T is an array type. + // If 'T' is an array type, HolderTraits::type is a struct with a 'T v;'. + // Otherwise, HolderTraits::type is simply 'T'. + // + // Maintainer's Note: The simpler solution would be to simply wrap T in a + // struct whether it's an array or not: 'struct Holder { T v; };', but + // that causes some paranoid diagnostics to misfire about uses of data(), + // believing that 'data()' (aka '&rep_.begin().v') is a pointer to a single + // element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + class HolderTraits { + template + struct SelectImpl { + using type = U; + static pointer AsValue(type* p) { return p; } + }; + + // Partial specialization for elements of array type. + template + struct SelectImpl { + struct Holder { U v[N]; }; + using type = Holder; + static pointer AsValue(type* p) { return &p->v; } + }; + using Impl = SelectImpl; + + public: + using type = typename Impl::type; + + static pointer AsValue(type *p) { return Impl::AsValue(p); } + + // TODO(billydonahue): fix the type aliasing violation + // this assertion hints at. + static_assert(sizeof(type) == sizeof(value_type), + "Holder must be same size as value_type"); + }; + + using Holder = typename HolderTraits::type; + static pointer AsValue(Holder *p) { return HolderTraits::AsValue(p); } + + // InlineSpace + // + // Allocate some space, not an array of elements of type T, so that we can + // skip calling the T constructors and destructors for space we never use. + // How many elements should we store inline? + // a. If not specified, use a default of kInlineBytesDefault bytes (This is + // currently 256 bytes, which seems small enough to not cause stack overflow + // or unnecessary stack pollution, while still allowing stack allocation for + // reasonably long character arrays). + // b. Never use 0 length arrays (not ISO C++) + // + template + class InlineSpace { + public: + Holder* data() { return reinterpret_cast(space_.data()); } + void AnnotateConstruct(size_t n) const { Annotate(n, true); } + void AnnotateDestruct(size_t n) const { Annotate(n, false); } + + private: +#ifndef ADDRESS_SANITIZER + void Annotate(size_t, bool) const { } +#else + void Annotate(size_t n, bool creating) const { + if (!n) return; + const void* bot = &left_redzone_; + const void* beg = space_.data(); + const void* end = space_.data() + n; + const void* top = &right_redzone_ + 1; + // args: (beg, end, old_mid, new_mid) + if (creating) { + ANNOTATE_CONTIGUOUS_CONTAINER(beg, top, top, end); + ANNOTATE_CONTIGUOUS_CONTAINER(bot, beg, beg, bot); + } else { + ANNOTATE_CONTIGUOUS_CONTAINER(beg, top, end, top); + ANNOTATE_CONTIGUOUS_CONTAINER(bot, beg, bot, beg); + } + } +#endif // ADDRESS_SANITIZER + + using Buffer = + typename std::aligned_storage::type; + + ADDRESS_SANITIZER_REDZONE(left_redzone_); + std::array space_; + ADDRESS_SANITIZER_REDZONE(right_redzone_); + }; + + // specialization when N = 0. + template + class InlineSpace<0, U> { + public: + Holder* data() { return nullptr; } + void AnnotateConstruct(size_t) const {} + void AnnotateDestruct(size_t) const {} + }; + + // Rep + // + // A const Rep object holds FixedArray's size and data pointer. + // + class Rep : public InlineSpace { + public: + Rep(size_type n, const value_type& val) : n_(n), p_(MakeHolder(n)) { + std::uninitialized_fill_n(p_, n, val); + } + + explicit Rep(size_type n) : n_(n), p_(MakeHolder(n)) { + // Loop optimizes to nothing for trivially constructible T. + for (Holder* p = p_; p != p_ + n; ++p) + // Note: no parens: default init only. + // Also note '::' to avoid Holder class placement new operator. + ::new (static_cast(p)) Holder; + } + + template + Rep(Iter first, Iter last) + : n_(std::distance(first, last)), p_(MakeHolder(n_)) { + std::uninitialized_copy(first, last, AsValue(p_)); + } + + ~Rep() { + // Destruction must be in reverse order. + // Loop optimizes to nothing for trivially destructible T. + for (Holder* p = end(); p != begin();) (--p)->~Holder(); + if (IsAllocated(size())) { + ::operator delete[](begin()); + } else { + this->AnnotateDestruct(size()); + } + } + Holder* begin() const { return p_; } + Holder* end() const { return p_ + n_; } + size_type size() const { return n_; } + + private: + Holder* MakeHolder(size_type n) { + if (IsAllocated(n)) { + return Allocate(n); + } else { + this->AnnotateConstruct(n); + return this->data(); + } + } + + Holder* Allocate(size_type n) { + return static_cast(::operator new[](n * sizeof(Holder))); + } + + bool IsAllocated(size_type n) const { return n > inline_elements; } + + const size_type n_; + Holder* const p_; + }; + + + // Data members + Rep rep_; +}; + +template +constexpr size_t FixedArray::inline_elements; + +template +constexpr size_t FixedArray::kInlineBytesDefault; + +} // namespace absl +#endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc new file mode 100644 index 000000000..9e88eab0c --- /dev/null +++ b/absl/container/fixed_array_test.cc @@ -0,0 +1,621 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/fixed_array.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/exception_testing.h" +#include "absl/memory/memory.h" + +namespace { + +// Helper routine to determine if a absl::FixedArray used stack allocation. +template +static bool IsOnStack(const ArrayType& a) { + return a.size() <= ArrayType::inline_elements; +} + +class ConstructionTester { + public: + ConstructionTester() + : self_ptr_(this), + value_(0) { + constructions++; + } + ~ConstructionTester() { + assert(self_ptr_ == this); + self_ptr_ = nullptr; + destructions++; + } + + // These are incremented as elements are constructed and destructed so we can + // be sure all elements are properly cleaned up. + static int constructions; + static int destructions; + + void CheckConstructed() { + assert(self_ptr_ == this); + } + + void set(int value) { value_ = value; } + int get() { return value_; } + + private: + // self_ptr_ should always point to 'this' -- that's how we can be sure the + // constructor has been called. + ConstructionTester* self_ptr_; + int value_; +}; + +int ConstructionTester::constructions = 0; +int ConstructionTester::destructions = 0; + +// ThreeInts will initialize its three ints to the value stored in +// ThreeInts::counter. The constructor increments counter so that each object +// in an array of ThreeInts will have different values. +class ThreeInts { + public: + ThreeInts() { + x_ = counter; + y_ = counter; + z_ = counter; + ++counter; + } + + static int counter; + + int x_, y_, z_; +}; + +int ThreeInts::counter = 0; + +TEST(FixedArrayTest, SmallObjects) { + // Small object arrays + { + // Short arrays should be on the stack + absl::FixedArray array(4); + EXPECT_TRUE(IsOnStack(array)); + } + + { + // Large arrays should be on the heap + absl::FixedArray array(1048576); + EXPECT_FALSE(IsOnStack(array)); + } + + { + // Arrays of <= default size should be on the stack + absl::FixedArray array(100); + EXPECT_TRUE(IsOnStack(array)); + } + + { + // Arrays of > default size should be on the stack + absl::FixedArray array(101); + EXPECT_FALSE(IsOnStack(array)); + } + + { + // Arrays with different size elements should use approximately + // same amount of stack space + absl::FixedArray array1(0); + absl::FixedArray array2(0); + EXPECT_LE(sizeof(array1), sizeof(array2)+100); + EXPECT_LE(sizeof(array2), sizeof(array1)+100); + } + + { + // Ensure that vectors are properly constructed inside a fixed array. + absl::FixedArray > array(2); + EXPECT_EQ(0, array[0].size()); + EXPECT_EQ(0, array[1].size()); + } + + { + // Regardless of absl::FixedArray implementation, check that a type with a + // low alignment requirement and a non power-of-two size is initialized + // correctly. + ThreeInts::counter = 1; + absl::FixedArray array(2); + EXPECT_EQ(1, array[0].x_); + EXPECT_EQ(1, array[0].y_); + EXPECT_EQ(1, array[0].z_); + EXPECT_EQ(2, array[1].x_); + EXPECT_EQ(2, array[1].y_); + EXPECT_EQ(2, array[1].z_); + } +} + +TEST(FixedArrayTest, AtThrows) { + absl::FixedArray a = {1, 2, 3}; + EXPECT_EQ(a.at(2), 3); + ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range, + "failed bounds check"); +} + +TEST(FixedArrayRelationalsTest, EqualArrays) { + for (int i = 0; i < 10; ++i) { + absl::FixedArray a1(i); + std::iota(a1.begin(), a1.end(), 0); + absl::FixedArray a2(a1.begin(), a1.end()); + + EXPECT_TRUE(a1 == a2); + EXPECT_FALSE(a1 != a2); + EXPECT_TRUE(a2 == a1); + EXPECT_FALSE(a2 != a1); + EXPECT_FALSE(a1 < a2); + EXPECT_FALSE(a1 > a2); + EXPECT_FALSE(a2 < a1); + EXPECT_FALSE(a2 > a1); + EXPECT_TRUE(a1 <= a2); + EXPECT_TRUE(a1 >= a2); + EXPECT_TRUE(a2 <= a1); + EXPECT_TRUE(a2 >= a1); + } +} + +TEST(FixedArrayRelationalsTest, UnequalArrays) { + for (int i = 1; i < 10; ++i) { + absl::FixedArray a1(i); + std::iota(a1.begin(), a1.end(), 0); + absl::FixedArray a2(a1.begin(), a1.end()); + --a2[i / 2]; + + EXPECT_FALSE(a1 == a2); + EXPECT_TRUE(a1 != a2); + EXPECT_FALSE(a2 == a1); + EXPECT_TRUE(a2 != a1); + EXPECT_FALSE(a1 < a2); + EXPECT_TRUE(a1 > a2); + EXPECT_TRUE(a2 < a1); + EXPECT_FALSE(a2 > a1); + EXPECT_FALSE(a1 <= a2); + EXPECT_TRUE(a1 >= a2); + EXPECT_TRUE(a2 <= a1); + EXPECT_FALSE(a2 >= a1); + } +} + +template +static void TestArray(int n) { + SCOPED_TRACE(n); + SCOPED_TRACE(stack_elements); + ConstructionTester::constructions = 0; + ConstructionTester::destructions = 0; + { + absl::FixedArray array(n); + + EXPECT_THAT(array.size(), n); + EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n); + EXPECT_THAT(array.begin() + n, array.end()); + + // Check that all elements were constructed + for (int i = 0; i < n; i++) { + array[i].CheckConstructed(); + } + // Check that no other elements were constructed + EXPECT_THAT(ConstructionTester::constructions, n); + + // Test operator[] + for (int i = 0; i < n; i++) { + array[i].set(i); + } + for (int i = 0; i < n; i++) { + EXPECT_THAT(array[i].get(), i); + EXPECT_THAT(array.data()[i].get(), i); + } + + // Test data() + for (int i = 0; i < n; i++) { + array.data()[i].set(i + 1); + } + for (int i = 0; i < n; i++) { + EXPECT_THAT(array[i].get(), i+1); + EXPECT_THAT(array.data()[i].get(), i+1); + } + } // Close scope containing 'array'. + + // Check that all constructed elements were destructed. + EXPECT_EQ(ConstructionTester::constructions, + ConstructionTester::destructions); +} + +template +static void TestArrayOfArrays(int n) { + SCOPED_TRACE(n); + SCOPED_TRACE(inline_elements); + SCOPED_TRACE(elements_per_inner_array); + ConstructionTester::constructions = 0; + ConstructionTester::destructions = 0; + { + using InnerArray = ConstructionTester[elements_per_inner_array]; + // Heap-allocate the FixedArray to avoid blowing the stack frame. + auto array_ptr = + absl::make_unique>(n); + auto& array = *array_ptr; + + ASSERT_EQ(array.size(), n); + ASSERT_EQ(array.memsize(), + sizeof(ConstructionTester) * elements_per_inner_array * n); + ASSERT_EQ(array.begin() + n, array.end()); + + // Check that all elements were constructed + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array[i])[j].CheckConstructed(); + } + } + // Check that no other elements were constructed + ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array); + + // Test operator[] + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array[i])[j].set(i * elements_per_inner_array + j); + } + } + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j); + ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j); + } + } + + // Test data() + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array.data()[i])[j].set((i + 1) * elements_per_inner_array + j); + } + } + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + ASSERT_EQ((array[i])[j].get(), + (i + 1) * elements_per_inner_array + j); + ASSERT_EQ((array.data()[i])[j].get(), + (i + 1) * elements_per_inner_array + j); + } + } + } // Close scope containing 'array'. + + // Check that all constructed elements were destructed. + EXPECT_EQ(ConstructionTester::constructions, + ConstructionTester::destructions); +} + +TEST(IteratorConstructorTest, NonInline) { + int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 }; + absl::FixedArray const fixed( + kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, Inline) { + int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 }; + absl::FixedArray const fixed( + kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, NonPod) { + char const* kInput[] = + { "red", "orange", "yellow", "green", "blue", "indigo", "violet" }; + absl::FixedArray const fixed(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, FromEmptyVector) { + std::vector const empty; + absl::FixedArray const fixed(empty.begin(), empty.end()); + EXPECT_EQ(0, fixed.size()); + EXPECT_EQ(empty.size(), fixed.size()); +} + +TEST(IteratorConstructorTest, FromNonEmptyVector) { + int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 }; + std::vector const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + absl::FixedArray const fixed(items.begin(), items.end()); + ASSERT_EQ(items.size(), fixed.size()); + for (size_t i = 0; i < items.size(); ++i) { + ASSERT_EQ(items[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) { + int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 }; + std::list const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + absl::FixedArray const fixed(items.begin(), items.end()); + EXPECT_THAT(fixed, testing::ElementsAreArray(kInput)); +} + +TEST(InitListConstructorTest, InitListConstruction) { + absl::FixedArray fixed = {1, 2, 3}; + EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3})); +} + +TEST(FillConstructorTest, NonEmptyArrays) { + absl::FixedArray stack_array(4, 1); + EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); + + absl::FixedArray heap_array(4, 1); + EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); +} + +TEST(FillConstructorTest, EmptyArray) { + absl::FixedArray empty_fill(0, 1); + absl::FixedArray empty_size(0); + EXPECT_EQ(empty_fill, empty_size); +} + +TEST(FillConstructorTest, NotTriviallyCopyable) { + std::string str = "abcd"; + absl::FixedArray strings = {str, str, str, str}; + + absl::FixedArray array(4, str); + EXPECT_EQ(array, strings); +} + +TEST(FillConstructorTest, Disambiguation) { + absl::FixedArray a(1, 2); + EXPECT_THAT(a, testing::ElementsAre(2)); +} + +TEST(FixedArrayTest, ManySizedArrays) { + std::vector sizes; + for (int i = 1; i < 100; i++) sizes.push_back(i); + for (int i = 100; i <= 1000; i += 100) sizes.push_back(i); + for (int n : sizes) { + TestArray<0>(n); + TestArray<1>(n); + TestArray<64>(n); + TestArray<1000>(n); + } +} + +TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) { + for (int n = 1; n < 1000; n++) { + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n))); + } +} + +TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) { + for (int n = 1; n < 1000; n++) { + TestArrayOfArrays<2, 0>(n); + TestArrayOfArrays<2, 1>(n); + TestArrayOfArrays<2, 64>(n); + TestArrayOfArrays<2, 1000>(n); + } +} + +// If value_type is put inside of a struct container, +// we might evoke this error in a hardened build unless data() is carefully +// written, so check on that. +// error: call to int __builtin___sprintf_chk(etc...) +// will always overflow destination buffer [-Werror] +TEST(FixedArrayTest, AvoidParanoidDiagnostics) { + absl::FixedArray buf(32); + sprintf(buf.data(), "foo"); // NOLINT(runtime/printf) +} + +TEST(FixedArrayTest, TooBigInlinedSpace) { + struct TooBig { + char c[1 << 20]; + }; // too big for even one on the stack + + // Simulate the data members of absl::FixedArray, a pointer and a size_t. + struct Data { + TooBig* p; + size_t size; + }; + + // Make sure TooBig objects are not inlined for 0 or default size. + static_assert(sizeof(absl::FixedArray) == sizeof(Data), + "0-sized absl::FixedArray should have same size as Data."); + static_assert(alignof(absl::FixedArray) == alignof(Data), + "0-sized absl::FixedArray should have same alignment as Data."); + static_assert(sizeof(absl::FixedArray) == sizeof(Data), + "default-sized absl::FixedArray should have same size as Data"); + static_assert( + alignof(absl::FixedArray) == alignof(Data), + "default-sized absl::FixedArray should have same alignment as Data."); +} + +// PickyDelete EXPECTs its class-scope deallocation funcs are unused. +struct PickyDelete { + PickyDelete() {} + ~PickyDelete() {} + void operator delete(void* p) { + EXPECT_TRUE(false) << __FUNCTION__; + ::operator delete(p); + } + void operator delete[](void* p) { + EXPECT_TRUE(false) << __FUNCTION__; + ::operator delete[](p); + } +}; + +TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray a(5); } + +TEST(FixedArrayTest, Data) { + static const int kInput[] = { 2, 3, 5, 7, 11, 13, 17 }; + absl::FixedArray fa(std::begin(kInput), std::end(kInput)); + EXPECT_EQ(fa.data(), &*fa.begin()); + EXPECT_EQ(fa.data(), &fa[0]); + + const absl::FixedArray& cfa = fa; + EXPECT_EQ(cfa.data(), &*cfa.begin()); + EXPECT_EQ(cfa.data(), &cfa[0]); +} + +TEST(FixedArrayTest, Empty) { + absl::FixedArray empty(0); + absl::FixedArray inline_filled(1); + absl::FixedArray heap_filled(1); + EXPECT_TRUE(empty.empty()); + EXPECT_FALSE(inline_filled.empty()); + EXPECT_FALSE(heap_filled.empty()); +} + +TEST(FixedArrayTest, FrontAndBack) { + absl::FixedArray inlined = {1, 2, 3}; + EXPECT_EQ(inlined.front(), 1); + EXPECT_EQ(inlined.back(), 3); + + absl::FixedArray allocated = {1, 2, 3}; + EXPECT_EQ(allocated.front(), 1); + EXPECT_EQ(allocated.back(), 3); + + absl::FixedArray one_element = {1}; + EXPECT_EQ(one_element.front(), one_element.back()); +} + +TEST(FixedArrayTest, ReverseIteratorInlined) { + absl::FixedArray a = {0, 1, 2, 3, 4}; + + int counter = 5; + for (absl::FixedArray::reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); +} + +TEST(FixedArrayTest, ReverseIteratorAllocated) { + absl::FixedArray a = {0, 1, 2, 3, 4}; + + int counter = 5; + for (absl::FixedArray::reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); +} + +TEST(FixedArrayTest, Fill) { + absl::FixedArray inlined(5); + int fill_val = 42; + inlined.fill(fill_val); + for (int i : inlined) EXPECT_EQ(i, fill_val); + + absl::FixedArray allocated(5); + allocated.fill(fill_val); + for (int i : allocated) EXPECT_EQ(i, fill_val); + + // It doesn't do anything, just make sure this compiles. + absl::FixedArray empty(0); + empty.fill(fill_val); +} + +#ifdef ADDRESS_SANITIZER +TEST(FixedArrayTest, AddressSanitizerAnnotations1) { + absl::FixedArray a(10); + int *raw = a.data(); + raw[0] = 0; + raw[9] = 0; + EXPECT_DEATH(raw[-2] = 0, "container-overflow"); + EXPECT_DEATH(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH(raw[10] = 0, "container-overflow"); + EXPECT_DEATH(raw[31] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations2) { + absl::FixedArray a(12); + char *raw = a.data(); + raw[0] = 0; + raw[11] = 0; + EXPECT_DEATH(raw[-7] = 0, "container-overflow"); + EXPECT_DEATH(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH(raw[12] = 0, "container-overflow"); + EXPECT_DEATH(raw[17] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations3) { + absl::FixedArray a(20); + uint64_t *raw = a.data(); + raw[0] = 0; + raw[19] = 0; + EXPECT_DEATH(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH(raw[20] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations4) { + absl::FixedArray a(10); + ThreeInts *raw = a.data(); + raw[0] = ThreeInts(); + raw[9] = ThreeInts(); + // Note: raw[-1] is pointing to 12 bytes before the container range. However, + // there is only a 8-byte red zone before the container range, so we only + // access the last 4 bytes of the struct to make sure it stays within the red + // zone. + EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow"); + EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow"); + // The actual size of storage is kDefaultBytes=256, 21*12 = 252, + // so reading raw[21] should still trigger the correct warning. + EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow"); +} +#endif // ADDRESS_SANITIZER + +} // namespace diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h new file mode 100644 index 000000000..f060f5c5c --- /dev/null +++ b/absl/container/inlined_vector.h @@ -0,0 +1,1330 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: inlined_vector.h +// ----------------------------------------------------------------------------- +// +// This header file contains the declaration and definition of an "inlined +// vector" which behaves in an equivalent fashion to a `std::vector`, except +// that storage for small sequences of the vector are provided inline without +// requiring any heap allocation. + +// An `absl::InlinedVector` specifies the size N at which to inline as one +// of its template parameters. Vectors of length <= N are provided inline. +// Typically N is very small (e.g., 4) so that sequences that are expected to be +// short do not require allocations. + +// An `absl::InlinedVector` does not usually require a specific allocator; if +// the inlined vector grows beyond its initial constraints, it will need to +// allocate (as any normal `std::vector` would) and it will generally use the +// default allocator in that case; optionally, a custom allocator may be +// specified using an `absl::InlinedVector` construction. + +#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/memory/memory.h" + +namespace absl { + +// ----------------------------------------------------------------------------- +// InlinedVector +// ----------------------------------------------------------------------------- +// +// An `absl::InlinedVector` is designed to be a drop-in replacement for +// `std::vector` for use cases where the vector's size is sufficiently small +// that it can be inlined. If the inlined vector does grow beyond its estimated +// size, it will trigger an initial allocation on the heap, and will behave as a +// `std:vector`. The API of the `absl::InlinedVector` within this file is +// designed to cover the same API footprint as covered by `std::vector`. +template > +class InlinedVector { + using AllocatorTraits = std::allocator_traits; + + public: + using allocator_type = A; + using value_type = typename allocator_type::value_type; + using pointer = typename allocator_type::pointer; + using const_pointer = typename allocator_type::const_pointer; + using reference = typename allocator_type::reference; + using const_reference = typename allocator_type::const_reference; + using size_type = typename allocator_type::size_type; + using difference_type = typename allocator_type::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + InlinedVector() noexcept(noexcept(allocator_type())) + : allocator_and_tag_(allocator_type()) {} + + explicit InlinedVector(const allocator_type& alloc) noexcept + : allocator_and_tag_(alloc) {} + + // Create a vector with n copies of value_type(). + explicit InlinedVector(size_type n) : allocator_and_tag_(allocator_type()) { + InitAssign(n); + } + + // Create a vector with n copies of elem + InlinedVector(size_type n, const value_type& elem, + const allocator_type& alloc = allocator_type()) + : allocator_and_tag_(alloc) { + InitAssign(n, elem); + } + + // Create and initialize with the elements [first .. last). + // The unused enable_if argument restricts this constructor so that it is + // elided when value_type is an integral type. This prevents ambiguous + // interpretation between a call to this constructor with two integral + // arguments and a call to the preceding (n, elem) constructor. + template + InlinedVector( + InputIterator first, InputIterator last, + const allocator_type& alloc = allocator_type(), + typename std::enable_if::value>::type* = + nullptr) + : allocator_and_tag_(alloc) { + AppendRange(first, last); + } + + InlinedVector(std::initializer_list init, + const allocator_type& alloc = allocator_type()) + : allocator_and_tag_(alloc) { + AppendRange(init.begin(), init.end()); + } + + InlinedVector(const InlinedVector& v); + InlinedVector(const InlinedVector& v, const allocator_type& alloc); + + InlinedVector(InlinedVector&& v) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value); + InlinedVector(InlinedVector&& v, const allocator_type& alloc) noexcept( + absl::allocator_is_nothrow::value); + + ~InlinedVector() { clear(); } + + InlinedVector& operator=(const InlinedVector& v) { + // Optimized to avoid reallocation. + // Prefer reassignment to copy construction for elements. + if (size() < v.size()) { // grow + reserve(v.size()); + std::copy(v.begin(), v.begin() + size(), begin()); + std::copy(v.begin() + size(), v.end(), std::back_inserter(*this)); + } else { // maybe shrink + erase(begin() + v.size(), end()); + std::copy(v.begin(), v.end(), begin()); + } + return *this; + } + + InlinedVector& operator=(InlinedVector&& v) { + if (this == &v) { + return *this; + } + if (v.allocated()) { + clear(); + tag().set_allocated_size(v.size()); + init_allocation(v.allocation()); + v.tag() = Tag(); + } else { + if (allocated()) clear(); + // Both are inlined now. + if (size() < v.size()) { + auto mid = std::make_move_iterator(v.begin() + size()); + std::copy(std::make_move_iterator(v.begin()), mid, begin()); + UninitializedCopy(mid, std::make_move_iterator(v.end()), end()); + } else { + auto new_end = std::copy(std::make_move_iterator(v.begin()), + std::make_move_iterator(v.end()), begin()); + Destroy(new_end, end()); + } + tag().set_inline_size(v.size()); + } + return *this; + } + + InlinedVector& operator=(std::initializer_list init) { + AssignRange(init.begin(), init.end()); + return *this; + } + + // InlinedVector::assign() + // + // Replaces the contents of the inlined vector with copies of those in the + // iterator range [first, last). + template + void assign( + InputIterator first, InputIterator last, + typename std::enable_if::value>::type* = + nullptr) { + AssignRange(first, last); + } + + // Overload of `InlinedVector::assign()` to take values from elements of an + // initializer list + void assign(std::initializer_list init) { + AssignRange(init.begin(), init.end()); + } + + // Overload of `InlinedVector::assign()` to replace the first `n` elements of + // the inlined vector with `elem` values. + void assign(size_type n, const value_type& elem) { + if (n <= size()) { // Possibly shrink + std::fill_n(begin(), n, elem); + erase(begin() + n, end()); + return; + } + // Grow + reserve(n); + std::fill_n(begin(), size(), elem); + if (allocated()) { + UninitializedFill(allocated_space() + size(), allocated_space() + n, + elem); + tag().set_allocated_size(n); + } else { + UninitializedFill(inlined_space() + size(), inlined_space() + n, elem); + tag().set_inline_size(n); + } + } + + // InlinedVector::size() + // + // Returns the number of elements in the inlined vector. + size_type size() const noexcept { return tag().size(); } + + // InlinedVector::empty() + // + // Checks if the inlined vector has no elements. + bool empty() const noexcept { return (size() == 0); } + + // InlinedVector::capacity() + // + // Returns the number of elements that can be stored in an inlined vector + // without requiring a reallocation of underlying memory. Note that for + // most inlined vectors, `capacity()` should equal its initial size `N`; for + // inlined vectors which exceed this capacity, they will no longer be inlined, + // and `capacity()` will equal its capacity on the allocated heap. + size_type capacity() const noexcept { + return allocated() ? allocation().capacity() : N; + } + + // InlinedVector::max_size() + // + // Returns the maximum number of elements the vector can hold. + size_type max_size() const noexcept { + // One bit of the size storage is used to indicate whether the inlined + // vector is allocated; as a result, the maximum size of the container that + // we can express is half of the max for our size type. + return std::numeric_limits::max() / 2; + } + + // InlinedVector::data() + // + // Returns a const T* pointer to elements of the inlined vector. This pointer + // can be used to access (but not modify) the contained elements. + // Only results within the range `[0,size())` are defined. + const_pointer data() const noexcept { + return allocated() ? allocated_space() : inlined_space(); + } + + // Overload of InlinedVector::data() to return a T* pointer to elements of the + // inlined vector. This pointer can be used to access and modify the contained + // elements. + pointer data() noexcept { + return allocated() ? allocated_space() : inlined_space(); + } + + // InlinedVector::clear() + // + // Removes all elements from the inlined vector. + void clear() noexcept { + size_type s = size(); + if (allocated()) { + Destroy(allocated_space(), allocated_space() + s); + allocation().Dealloc(allocator()); + } else if (s != 0) { // do nothing for empty vectors + Destroy(inlined_space(), inlined_space() + s); + } + tag() = Tag(); + } + + // InlinedVector::at() + // + // Returns the ith element of an inlined vector. + const value_type& at(size_type i) const { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "InlinedVector::at failed bounds check"); + } + return data()[i]; + } + + // InlinedVector::operator[] + // + // Returns the ith element of an inlined vector using the array operator. + const value_type& operator[](size_type i) const { + assert(i < size()); + return data()[i]; + } + + // Overload of InlinedVector::at() to return the ith element of an inlined + // vector. + value_type& at(size_type i) { + if (i >= size()) { + base_internal::ThrowStdOutOfRange( + "InlinedVector::at failed bounds check"); + } + return data()[i]; + } + + // Overload of InlinedVector::operator[] to return the ith element of an + // inlined vector. + value_type& operator[](size_type i) { + assert(i < size()); + return data()[i]; + } + + // InlinedVector::back() + // + // Returns a reference to the last element of an inlined vector. + value_type& back() { + assert(!empty()); + return at(size() - 1); + } + + // Overload of InlinedVector::back() returns a reference to the last element + // of an inlined vector of const values. + const value_type& back() const { + assert(!empty()); + return at(size() - 1); + } + + // InlinedVector::front() + // + // Returns a reference to the first element of an inlined vector. + value_type& front() { + assert(!empty()); + return at(0); + } + + // Overload of InlinedVector::front() returns a reference to the first element + // of an inlined vector of const values. + const value_type& front() const { + assert(!empty()); + return at(0); + } + + // InlinedVector::emplace_back() + // + // Constructs and appends an object to the inlined vector. + template + void emplace_back(Args&&... args) { + size_type s = size(); + assert(s <= capacity()); + if (ABSL_PREDICT_FALSE(s == capacity())) { + GrowAndEmplaceBack(std::forward(args)...); + return; + } + assert(s < capacity()); + + value_type* space; + if (allocated()) { + tag().set_allocated_size(s + 1); + space = allocated_space(); + } else { + tag().set_inline_size(s + 1); + space = inlined_space(); + } + Construct(space + s, std::forward(args)...); + } + + // InlinedVector::push_back() + // + // Appends a const element to the inlined vector. + void push_back(const value_type& t) { emplace_back(t); } + + // Overload of InlinedVector::push_back() to append a move-only element to the + // inlined vector. + void push_back(value_type&& t) { emplace_back(std::move(t)); } + + // InlinedVector::pop_back() + // + // Removes the last element (which is destroyed) in the inlined vector. + void pop_back() { + assert(!empty()); + size_type s = size(); + if (allocated()) { + Destroy(allocated_space() + s - 1, allocated_space() + s); + tag().set_allocated_size(s - 1); + } else { + Destroy(inlined_space() + s - 1, inlined_space() + s); + tag().set_inline_size(s - 1); + } + } + + // InlinedVector::resize() + // + // Resizes the inlined vector to contain `n` elements. If `n` is smaller than + // the inlined vector's current size, extra elements are destroyed. If `n` is + // larger than the initial size, new elements are value-initialized. + void resize(size_type n); + + // Overload of InlinedVector::resize() to resize the inlined vector to contain + // `n` elements. If `n` is larger than the current size, enough copies of + // `elem` are appended to increase its size to `n`. + void resize(size_type n, const value_type& elem); + + // InlinedVector::begin() + // + // Returns an iterator to the beginning of the inlined vector. + iterator begin() noexcept { return data(); } + + // Overload of InlinedVector::begin() for returning a const iterator to the + // beginning of the inlined vector. + const_iterator begin() const noexcept { return data(); } + + // InlinedVector::cbegin() + // + // Returns a const iterator to the beginning of the inlined vector. + const_iterator cbegin() const noexcept { return begin(); } + + // InlinedVector::end() + // + // Returns an iterator to the end of the inlined vector. + iterator end() noexcept { return data() + size(); } + + // Overload of InlinedVector::end() for returning a const iterator to the end + // of the inlined vector. + const_iterator end() const noexcept { return data() + size(); } + + // InlinedVector::cend() + // + // Returns a const iterator to the end of the inlined vector. + const_iterator cend() const noexcept { return end(); } + + // InlinedVector::rbegin() + // + // Returns a reverse iterator from the end of the inlined vector. + reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } + + // Overload of InlinedVector::rbegin() for returning a const reverse iterator + // from the end of the inlined vector. + const_reverse_iterator rbegin() const noexcept { + return const_reverse_iterator(end()); + } + + // InlinedVector::crbegin() + // + // Returns a const reverse iterator from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept { return rbegin(); } + + // InlinedVector::rend() + // + // Returns a reverse iterator from the beginning of the inlined vector. + reverse_iterator rend() noexcept { return reverse_iterator(begin()); } + + // Overload of InlinedVector::rend() for returning a const reverse iterator + // from the beginning of the inlined vector. + const_reverse_iterator rend() const noexcept { + return const_reverse_iterator(begin()); + } + + // InlinedVector::crend() + // + // Returns a reverse iterator from the beginning of the inlined vector. + const_reverse_iterator crend() const noexcept { return rend(); } + + // InlinedVector::emplace() + // + // Constructs and inserts an object to the inlined vector at the given + // `position`, returning an iterator pointing to the newly emplaced element. + template + iterator emplace(const_iterator position, Args&&... args); + + // InlinedVector::insert() + // + // Inserts an element of the specified value at `position`, returning an + // iterator pointing to the newly inserted element. + iterator insert(const_iterator position, const value_type& v) { + return emplace(position, v); + } + + // Overload of InlinedVector::insert() for inserting an element of the + // specified rvalue, returning an iterator pointing to the newly inserted + // element. + iterator insert(const_iterator position, value_type&& v) { + return emplace(position, std::move(v)); + } + + // Overload of InlinedVector::insert() for inserting `n` elements of the + // specified value at `position`, returning an iterator pointing to the first + // of the newly inserted elements. + iterator insert(const_iterator position, size_type n, const value_type& v) { + return InsertWithCount(position, n, v); + } + + // Overload of `InlinedVector::insert()` to disambiguate the two + // three-argument overloads of `insert()`, returning an iterator pointing to + // the first of the newly inserted elements. + template ::iterator_category, + std::input_iterator_tag>::value>::type> + iterator insert(const_iterator position, InputIterator first, + InputIterator last) { + using IterType = + typename std::iterator_traits::iterator_category; + return InsertWithRange(position, first, last, IterType()); + } + + // Overload of InlinedVector::insert() for inserting a list of elements at + // `position`, returning an iterator pointing to the first of the newly + // inserted elements. + iterator insert(const_iterator position, + std::initializer_list init) { + return insert(position, init.begin(), init.end()); + } + + // InlinedVector::erase() + // + // Erases the element at `position` of the inlined vector, returning an + // iterator pointing to the following element or the container's end if the + // last element was erased. + iterator erase(const_iterator position) { + assert(position >= begin()); + assert(position < end()); + + iterator pos = const_cast(position); + std::move(pos + 1, end(), pos); + pop_back(); + return pos; + } + + // Overload of InlinedVector::erase() for erasing all elements in the + // iteraror range [first, last) in the inlined vector, returning an iterator + // pointing to the first element following the range erased, or the + // container's end if range included the container's last element. + iterator erase(const_iterator first, const_iterator last); + + // InlinedVector::reserve() + // + // Enlarges the underlying representation of the inlined vector so it can hold + // at least `n` elements. This method does not change `size()` or the actual + // contents of the vector. + // + // Note that if `n` does not exceed the inlined vector's initial size `N`, + // `reserve()` will have no effect; if it does exceed its initial size, + // `reserve()` will trigger an initial allocation and move the inlined vector + // onto the heap. If the vector already exists on the heap and the requested + // size exceeds it, a reallocation will be performed. + void reserve(size_type n) { + if (n > capacity()) { + // Make room for new elements + EnlargeBy(n - size()); + } + } + + // InlinedVector::swap() + // + // Swaps the contents of this inlined vector with the contents of `other`. + void swap(InlinedVector& other); + + // InlinedVector::get_allocator() + // + // Returns the allocator of this inlined vector. + allocator_type get_allocator() const { return allocator(); } + + private: + static_assert(N > 0, "inlined vector with nonpositive size"); + + // It holds whether the vector is allocated or not in the lowest bit. + // The size is held in the high bits: + // size_ = (size << 1) | is_allocated; + class Tag { + public: + Tag() : size_(0) {} + size_type size() const { return size_ >> 1; } + void add_size(size_type n) { size_ += n << 1; } + void set_inline_size(size_type n) { size_ = n << 1; } + void set_allocated_size(size_type n) { size_ = (n << 1) | 1; } + bool allocated() const { return size_ & 1; } + + private: + size_type size_; + }; + + // Derives from allocator_type to use the empty base class optimization. + // If the allocator_type is stateless, we can 'store' + // our instance of it for free. + class AllocatorAndTag : private allocator_type { + public: + explicit AllocatorAndTag(const allocator_type& a, Tag t = Tag()) + : allocator_type(a), tag_(t) { + } + Tag& tag() { return tag_; } + const Tag& tag() const { return tag_; } + allocator_type& allocator() { return *this; } + const allocator_type& allocator() const { return *this; } + private: + Tag tag_; + }; + + class Allocation { + public: + Allocation(allocator_type& a, // NOLINT(runtime/references) + size_type capacity) + : capacity_(capacity), + buffer_(AllocatorTraits::allocate(a, capacity_)) {} + + void Dealloc(allocator_type& a) { // NOLINT(runtime/references) + AllocatorTraits::deallocate(a, buffer(), capacity()); + } + + size_type capacity() const { return capacity_; } + const value_type* buffer() const { return buffer_; } + value_type* buffer() { return buffer_; } + + private: + size_type capacity_; + value_type* buffer_; + }; + + const Tag& tag() const { return allocator_and_tag_.tag(); } + Tag& tag() { return allocator_and_tag_.tag(); } + + Allocation& allocation() { + return reinterpret_cast(rep_.allocation_storage.allocation); + } + const Allocation& allocation() const { + return reinterpret_cast( + rep_.allocation_storage.allocation); + } + void init_allocation(const Allocation& allocation) { + new (&rep_.allocation_storage.allocation) Allocation(allocation); + } + + value_type* inlined_space() { + return reinterpret_cast(&rep_.inlined_storage.inlined); + } + const value_type* inlined_space() const { + return reinterpret_cast(&rep_.inlined_storage.inlined); + } + + value_type* allocated_space() { + return allocation().buffer(); + } + const value_type* allocated_space() const { + return allocation().buffer(); + } + + const allocator_type& allocator() const { + return allocator_and_tag_.allocator(); + } + allocator_type& allocator() { + return allocator_and_tag_.allocator(); + } + + bool allocated() const { return tag().allocated(); } + + // Enlarge the underlying representation so we can store size_ + delta elems. + // The size is not changed, and any newly added memory is not initialized. + void EnlargeBy(size_type delta); + + // Shift all elements from position to end() n places to the right. + // If the vector needs to be enlarged, memory will be allocated. + // Returns iterators pointing to the start of the previously-initialized + // portion and the start of the uninitialized portion of the created gap. + // The number of initialized spots is pair.second - pair.first; + // the number of raw spots is n - (pair.second - pair.first). + std::pair ShiftRight(const_iterator position, + size_type n); + + void ResetAllocation(Allocation new_allocation, size_type new_size) { + if (allocated()) { + Destroy(allocated_space(), allocated_space() + size()); + assert(begin() == allocated_space()); + allocation().Dealloc(allocator()); + allocation() = new_allocation; + } else { + Destroy(inlined_space(), inlined_space() + size()); + init_allocation(new_allocation); // bug: only init once + } + tag().set_allocated_size(new_size); + } + + template + void GrowAndEmplaceBack(Args&&... args) { + assert(size() == capacity()); + const size_type s = size(); + + Allocation new_allocation(allocator(), 2 * capacity()); + + Construct(new_allocation.buffer() + s, std::forward(args)...); + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + s), + new_allocation.buffer()); + + ResetAllocation(new_allocation, s + 1); + } + + void InitAssign(size_type n); + void InitAssign(size_type n, const value_type& t); + + template + void Construct(pointer p, Args&&... args) { + AllocatorTraits::construct(allocator(), p, std::forward(args)...); + } + + template + void UninitializedCopy(Iter src, Iter src_last, value_type* dst) { + for (; src != src_last; ++dst, ++src) Construct(dst, *src); + } + + template + void UninitializedFill(value_type* dst, value_type* dst_last, + const Args&... args) { + for (; dst != dst_last; ++dst) Construct(dst, args...); + } + + // Destroy [ptr, ptr_last) in place. + void Destroy(value_type* ptr, value_type* ptr_last); + + template + void AppendRange(Iter first, Iter last, std::input_iterator_tag) { + std::copy(first, last, std::back_inserter(*this)); + } + + // Faster path for forward iterators. + template + void AppendRange(Iter first, Iter last, std::forward_iterator_tag); + + template + void AppendRange(Iter first, Iter last) { + using IterTag = typename std::iterator_traits::iterator_category; + AppendRange(first, last, IterTag()); + } + + template + void AssignRange(Iter first, Iter last, std::input_iterator_tag); + + // Faster path for forward iterators. + template + void AssignRange(Iter first, Iter last, std::forward_iterator_tag); + + template + void AssignRange(Iter first, Iter last) { + using IterTag = typename std::iterator_traits::iterator_category; + AssignRange(first, last, IterTag()); + } + + iterator InsertWithCount(const_iterator position, size_type n, + const value_type& v); + + template + iterator InsertWithRange(const_iterator position, InputIter first, + InputIter last, std::input_iterator_tag); + template + iterator InsertWithRange(const_iterator position, ForwardIter first, + ForwardIter last, std::forward_iterator_tag); + + AllocatorAndTag allocator_and_tag_; + + // Either the inlined or allocated representation + union Rep { + // Use struct to perform indirection that solves a bizarre compilation + // error on Visual Studio (all known versions). + struct { + typename std::aligned_storage::type inlined[N]; + } inlined_storage; + struct { + typename std::aligned_storage::type allocation; + } allocation_storage; + } rep_; +}; + +// ----------------------------------------------------------------------------- +// InlinedVector Non-Member Functions +// ----------------------------------------------------------------------------- + +// swap() +// +// Swaps the contents of two inlined vectors. This convenience function +// simply calls InlinedVector::swap(other_inlined_vector). +template +void swap(InlinedVector& a, + InlinedVector& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); +} + +// operator==() +// +// Tests the equivalency of the contents of two inlined vectors. +template +bool operator==(const InlinedVector& a, + const InlinedVector& b) { + return absl::equal(a.begin(), a.end(), b.begin(), b.end()); +} + +// operator!=() +// +// Tests the inequality of the contents of two inlined vectors. +template +bool operator!=(const InlinedVector& a, + const InlinedVector& b) { + return !(a == b); +} + +// operator<() +// +// Tests whether the contents of one inlined vector are less than the contents +// of another through a lexicographical comparison operation. +template +bool operator<(const InlinedVector& a, + const InlinedVector& b) { + return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); +} + +// operator>() +// +// Tests whether the contents of one inlined vector are greater than the +// contents of another through a lexicographical comparison operation. +template +bool operator>(const InlinedVector& a, + const InlinedVector& b) { + return b < a; +} + +// operator<=() +// +// Tests whether the contents of one inlined vector are less than or equal to +// the contents of another through a lexicographical comparison operation. +template +bool operator<=(const InlinedVector& a, + const InlinedVector& b) { + return !(b < a); +} + +// operator>=() +// +// Tests whether the contents of one inlined vector are greater than or equal to +// the contents of another through a lexicographical comparison operation. +template +bool operator>=(const InlinedVector& a, + const InlinedVector& b) { + return !(a < b); +} + +// ----------------------------------------------------------------------------- +// Implementation of InlinedVector +// ----------------------------------------------------------------------------- +// +// Do not depend on any implementation details below this line. + +template +InlinedVector::InlinedVector(const InlinedVector& v) + : allocator_and_tag_(v.allocator()) { + reserve(v.size()); + if (allocated()) { + UninitializedCopy(v.begin(), v.end(), allocated_space()); + tag().set_allocated_size(v.size()); + } else { + UninitializedCopy(v.begin(), v.end(), inlined_space()); + tag().set_inline_size(v.size()); + } +} + +template +InlinedVector::InlinedVector(const InlinedVector& v, + const allocator_type& alloc) + : allocator_and_tag_(alloc) { + reserve(v.size()); + if (allocated()) { + UninitializedCopy(v.begin(), v.end(), allocated_space()); + tag().set_allocated_size(v.size()); + } else { + UninitializedCopy(v.begin(), v.end(), inlined_space()); + tag().set_inline_size(v.size()); + } +} + +template +InlinedVector::InlinedVector(InlinedVector&& v) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value) + : allocator_and_tag_(v.allocator_and_tag_) { + if (v.allocated()) { + // We can just steal the underlying buffer from the source. + // That leaves the source empty, so we clear its size. + init_allocation(v.allocation()); + v.tag() = Tag(); + } else { + UninitializedCopy(std::make_move_iterator(v.inlined_space()), + std::make_move_iterator(v.inlined_space() + v.size()), + inlined_space()); + } +} + +template +InlinedVector::InlinedVector( + InlinedVector&& v, + const allocator_type& + alloc) noexcept(absl::allocator_is_nothrow::value) + : allocator_and_tag_(alloc) { + if (v.allocated()) { + if (alloc == v.allocator()) { + // We can just steal the allocation from the source. + tag() = v.tag(); + init_allocation(v.allocation()); + v.tag() = Tag(); + } else { + // We need to use our own allocator + reserve(v.size()); + UninitializedCopy(std::make_move_iterator(v.begin()), + std::make_move_iterator(v.end()), allocated_space()); + tag().set_allocated_size(v.size()); + } + } else { + UninitializedCopy(std::make_move_iterator(v.inlined_space()), + std::make_move_iterator(v.inlined_space() + v.size()), + inlined_space()); + tag().set_inline_size(v.size()); + } +} + +template +void InlinedVector::InitAssign(size_type n, const value_type& t) { + if (n > static_cast(N)) { + Allocation new_allocation(allocator(), n); + init_allocation(new_allocation); + UninitializedFill(allocated_space(), allocated_space() + n, t); + tag().set_allocated_size(n); + } else { + UninitializedFill(inlined_space(), inlined_space() + n, t); + tag().set_inline_size(n); + } +} + +template +void InlinedVector::InitAssign(size_type n) { + if (n > static_cast(N)) { + Allocation new_allocation(allocator(), n); + init_allocation(new_allocation); + UninitializedFill(allocated_space(), allocated_space() + n); + tag().set_allocated_size(n); + } else { + UninitializedFill(inlined_space(), inlined_space() + n); + tag().set_inline_size(n); + } +} + +template +void InlinedVector::resize(size_type n) { + size_type s = size(); + if (n < s) { + erase(begin() + n, end()); + return; + } + reserve(n); + assert(capacity() >= n); + + // Fill new space with elements constructed in-place. + if (allocated()) { + UninitializedFill(allocated_space() + s, allocated_space() + n); + tag().set_allocated_size(n); + } else { + UninitializedFill(inlined_space() + s, inlined_space() + n); + tag().set_inline_size(n); + } +} + +template +void InlinedVector::resize(size_type n, const value_type& elem) { + size_type s = size(); + if (n < s) { + erase(begin() + n, end()); + return; + } + reserve(n); + assert(capacity() >= n); + + // Fill new space with copies of 'elem'. + if (allocated()) { + UninitializedFill(allocated_space() + s, allocated_space() + n, elem); + tag().set_allocated_size(n); + } else { + UninitializedFill(inlined_space() + s, inlined_space() + n, elem); + tag().set_inline_size(n); + } +} + +template +template +typename InlinedVector::iterator InlinedVector::emplace( + const_iterator position, Args&&... args) { + assert(position >= begin()); + assert(position <= end()); + if (position == end()) { + emplace_back(std::forward(args)...); + return end() - 1; + } + size_type s = size(); + size_type idx = std::distance(cbegin(), position); + if (s == capacity()) { + EnlargeBy(1); + } + assert(s < capacity()); + iterator pos = begin() + idx; // Set 'pos' to a post-enlarge iterator. + + pointer space; + if (allocated()) { + tag().set_allocated_size(s + 1); + space = allocated_space(); + } else { + tag().set_inline_size(s + 1); + space = inlined_space(); + } + Construct(space + s, std::move(space[s - 1])); + std::move_backward(pos, space + s - 1, space + s); + Destroy(pos, pos + 1); + Construct(pos, std::forward(args)...); + + return pos; +} + +template +typename InlinedVector::iterator InlinedVector::erase( + const_iterator first, const_iterator last) { + assert(begin() <= first); + assert(first <= last); + assert(last <= end()); + + iterator range_start = const_cast(first); + iterator range_end = const_cast(last); + + size_type s = size(); + ptrdiff_t erase_gap = std::distance(range_start, range_end); + if (erase_gap > 0) { + pointer space; + if (allocated()) { + space = allocated_space(); + tag().set_allocated_size(s - erase_gap); + } else { + space = inlined_space(); + tag().set_inline_size(s - erase_gap); + } + std::move(range_end, space + s, range_start); + Destroy(space + s - erase_gap, space + s); + } + return range_start; +} + +template +void InlinedVector::swap(InlinedVector& other) { + using std::swap; // Augment ADL with std::swap. + if (&other == this) { + return; + } + if (allocated() && other.allocated()) { + // Both out of line, so just swap the tag, allocation, and allocator. + swap(tag(), other.tag()); + swap(allocation(), other.allocation()); + swap(allocator(), other.allocator()); + return; + } + if (!allocated() && !other.allocated()) { + // Both inlined: swap up to smaller size, then move remaining elements. + InlinedVector* a = this; + InlinedVector* b = &other; + if (size() < other.size()) { + swap(a, b); + } + + const size_type a_size = a->size(); + const size_type b_size = b->size(); + assert(a_size >= b_size); + // 'a' is larger. Swap the elements up to the smaller array size. + std::swap_ranges(a->inlined_space(), + a->inlined_space() + b_size, + b->inlined_space()); + + // Move the remaining elements: A[b_size,a_size) -> B[b_size,a_size) + b->UninitializedCopy(a->inlined_space() + b_size, + a->inlined_space() + a_size, + b->inlined_space() + b_size); + a->Destroy(a->inlined_space() + b_size, a->inlined_space() + a_size); + + swap(a->tag(), b->tag()); + swap(a->allocator(), b->allocator()); + assert(b->size() == a_size); + assert(a->size() == b_size); + return; + } + // One is out of line, one is inline. + // We first move the elements from the inlined vector into the + // inlined space in the other vector. We then put the other vector's + // pointer/capacity into the originally inlined vector and swap + // the tags. + InlinedVector* a = this; + InlinedVector* b = &other; + if (a->allocated()) { + swap(a, b); + } + assert(!a->allocated()); + assert(b->allocated()); + const size_type a_size = a->size(); + const size_type b_size = b->size(); + // In an optimized build, b_size would be unused. + (void)b_size; + + // Made Local copies of size(), don't need tag() accurate anymore + swap(a->tag(), b->tag()); + + // Copy b_allocation out before b's union gets clobbered by inline_space. + Allocation b_allocation = b->allocation(); + + b->UninitializedCopy(a->inlined_space(), a->inlined_space() + a_size, + b->inlined_space()); + a->Destroy(a->inlined_space(), a->inlined_space() + a_size); + + a->allocation() = b_allocation; + + if (a->allocator() != b->allocator()) { + swap(a->allocator(), b->allocator()); + } + + assert(b->size() == a_size); + assert(a->size() == b_size); +} + +template +void InlinedVector::EnlargeBy(size_type delta) { + const size_type s = size(); + assert(s <= capacity()); + + size_type target = std::max(static_cast(N), s + delta); + + // Compute new capacity by repeatedly doubling current capacity + // TODO(psrc): Check and avoid overflow? + size_type new_capacity = capacity(); + while (new_capacity < target) { + new_capacity <<= 1; + } + + Allocation new_allocation(allocator(), new_capacity); + + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + s), + new_allocation.buffer()); + + ResetAllocation(new_allocation, s); +} + +template +auto InlinedVector::ShiftRight(const_iterator position, size_type n) + -> std::pair { + iterator start_used = const_cast(position); + iterator start_raw = const_cast(position); + size_type s = size(); + size_type required_size = s + n; + + if (required_size > capacity()) { + // Compute new capacity by repeatedly doubling current capacity + size_type new_capacity = capacity(); + while (new_capacity < required_size) { + new_capacity <<= 1; + } + // Move everyone into the new allocation, leaving a gap of n for the + // requested shift. + Allocation new_allocation(allocator(), new_capacity); + size_type index = position - begin(); + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + index), + new_allocation.buffer()); + UninitializedCopy(std::make_move_iterator(data() + index), + std::make_move_iterator(data() + s), + new_allocation.buffer() + index + n); + ResetAllocation(new_allocation, s); + + // New allocation means our iterator is invalid, so we'll recalculate. + // Since the entire gap is in new space, there's no used space to reuse. + start_raw = begin() + index; + start_used = start_raw; + } else { + // If we had enough space, it's a two-part move. Elements going into + // previously-unoccupied space need an UninitializedCopy. Elements + // going into a previously-occupied space are just a move. + iterator pos = const_cast(position); + iterator raw_space = end(); + size_type slots_in_used_space = raw_space - pos; + size_type new_elements_in_used_space = std::min(n, slots_in_used_space); + size_type new_elements_in_raw_space = n - new_elements_in_used_space; + size_type old_elements_in_used_space = + slots_in_used_space - new_elements_in_used_space; + + UninitializedCopy(std::make_move_iterator(pos + old_elements_in_used_space), + std::make_move_iterator(raw_space), + raw_space + new_elements_in_raw_space); + std::move_backward(pos, pos + old_elements_in_used_space, raw_space); + + // If the gap is entirely in raw space, the used space starts where the raw + // space starts, leaving no elements in used space. If the gap is entirely + // in used space, the raw space starts at the end of the gap, leaving all + // elements accounted for within the used space. + start_used = pos; + start_raw = pos + new_elements_in_used_space; + } + return std::make_pair(start_used, start_raw); +} + +template +void InlinedVector::Destroy(value_type* ptr, value_type* ptr_last) { + for (value_type* p = ptr; p != ptr_last; ++p) { + AllocatorTraits::destroy(allocator(), p); + } + + // Overwrite unused memory with 0xab so we can catch uninitialized usage. + // Cast to void* to tell the compiler that we don't care that we might be + // scribbling on a vtable pointer. +#ifndef NDEBUG + if (ptr != ptr_last) { + memset(reinterpret_cast(ptr), 0xab, + sizeof(*ptr) * (ptr_last - ptr)); + } +#endif +} + +template +template +void InlinedVector::AppendRange(Iter first, Iter last, + std::forward_iterator_tag) { + using Length = typename std::iterator_traits::difference_type; + Length length = std::distance(first, last); + reserve(size() + length); + if (allocated()) { + UninitializedCopy(first, last, allocated_space() + size()); + tag().set_allocated_size(size() + length); + } else { + UninitializedCopy(first, last, inlined_space() + size()); + tag().set_inline_size(size() + length); + } +} + +template +template +void InlinedVector::AssignRange(Iter first, Iter last, + std::input_iterator_tag) { + // Optimized to avoid reallocation. + // Prefer reassignment to copy construction for elements. + iterator out = begin(); + for ( ; first != last && out != end(); ++first, ++out) + *out = *first; + erase(out, end()); + std::copy(first, last, std::back_inserter(*this)); +} + +template +template +void InlinedVector::AssignRange(Iter first, Iter last, + std::forward_iterator_tag) { + using Length = typename std::iterator_traits::difference_type; + Length length = std::distance(first, last); + // Prefer reassignment to copy construction for elements. + if (static_cast(length) <= size()) { + erase(std::copy(first, last, begin()), end()); + return; + } + reserve(length); + iterator out = begin(); + for (; out != end(); ++first, ++out) *out = *first; + if (allocated()) { + UninitializedCopy(first, last, out); + tag().set_allocated_size(length); + } else { + UninitializedCopy(first, last, out); + tag().set_inline_size(length); + } +} + +template +auto InlinedVector::InsertWithCount(const_iterator position, + size_type n, const value_type& v) + -> iterator { + assert(position >= begin() && position <= end()); + if (n == 0) return const_cast(position); + std::pair it_pair = ShiftRight(position, n); + std::fill(it_pair.first, it_pair.second, v); + UninitializedFill(it_pair.second, it_pair.first + n, v); + tag().add_size(n); + return it_pair.first; +} + +template +template +auto InlinedVector::InsertWithRange(const_iterator position, + InputIter first, InputIter last, + std::input_iterator_tag) + -> iterator { + assert(position >= begin() && position <= end()); + size_type index = position - cbegin(); + size_type i = index; + while (first != last) insert(begin() + i++, *first++); + return begin() + index; +} + +// Overload of InlinedVector::InsertWithRange() +template +template +auto InlinedVector::InsertWithRange(const_iterator position, + ForwardIter first, + ForwardIter last, + std::forward_iterator_tag) + -> iterator { + assert(position >= begin() && position <= end()); + if (first == last) { + return const_cast(position); + } + using Length = typename std::iterator_traits::difference_type; + Length n = std::distance(first, last); + std::pair it_pair = ShiftRight(position, n); + size_type used_spots = it_pair.second - it_pair.first; + ForwardIter open_spot = std::next(first, used_spots); + std::copy(first, open_spot, it_pair.first); + UninitializedCopy(open_spot, last, it_pair.second); + tag().add_size(n); + return it_pair.first; +} + +} // namespace absl + +#endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc new file mode 100644 index 000000000..c559a9a1f --- /dev/null +++ b/absl/container/inlined_vector_test.cc @@ -0,0 +1,1593 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/inlined_vector.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/exception_testing.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace { + +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::CopyableOnlyInstance; +using absl::test_internal::InstanceTracker; +using testing::AllOf; +using testing::Each; +using testing::ElementsAre; +using testing::ElementsAreArray; +using testing::Eq; +using testing::Gt; +using testing::PrintToString; + +using IntVec = absl::InlinedVector; + +MATCHER_P(SizeIs, n, "") { + return testing::ExplainMatchResult(n, arg.size(), result_listener); +} + +MATCHER_P(CapacityIs, n, "") { + return testing::ExplainMatchResult(n, arg.capacity(), result_listener); +} + +MATCHER_P(ValueIs, e, "") { + return testing::ExplainMatchResult(e, arg.value(), result_listener); +} + +// TODO(bsamwel): Add support for movable-only types. + +// Test fixture for typed tests on BaseCountedInstance derived classes, see +// test_instance_tracker.h. +template +class InstanceTest : public ::testing::Test {}; +TYPED_TEST_CASE_P(InstanceTest); + +// A simple reference counted class to make sure that the proper elements are +// destroyed in the erase(begin, end) test. +class RefCounted { + public: + RefCounted(int value, int* count) : value_(value), count_(count) { + Ref(); + } + + RefCounted(const RefCounted& v) + : value_(v.value_), count_(v.count_) { + Ref(); + } + + ~RefCounted() { + Unref(); + count_ = nullptr; + } + + friend void swap(RefCounted& a, RefCounted& b) { + using std::swap; + swap(a.value_, b.value_); + swap(a.count_, b.count_); + } + + RefCounted& operator=(RefCounted v) { + using std::swap; + swap(*this, v); + return *this; + } + + void Ref() const { + ABSL_RAW_CHECK(count_ != nullptr, ""); + ++(*count_); + } + + void Unref() const { + --(*count_); + ABSL_RAW_CHECK(*count_ >= 0, ""); + } + + int value_; + int* count_; +}; + +using RefCountedVec = absl::InlinedVector; + +// A class with a vtable pointer +class Dynamic { + public: + virtual ~Dynamic() {} +}; + +using DynamicVec = absl::InlinedVector; + +// Append 0..len-1 to *v +template +static void Fill(Container* v, int len, int offset = 0) { + for (int i = 0; i < len; i++) { + v->push_back(i + offset); + } +} + +static IntVec Fill(int len, int offset = 0) { + IntVec v; + Fill(&v, len, offset); + return v; +} + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template +class CountingAllocator : public std::allocator { + public: + using Alloc = std::allocator; + using pointer = typename Alloc::pointer; + using size_type = typename Alloc::size_type; + + CountingAllocator() : bytes_used_(nullptr) {} + explicit CountingAllocator(int64_t* b) : bytes_used_(b) {} + + template + CountingAllocator(const CountingAllocator& x) + : Alloc(x), bytes_used_(x.bytes_used_) {} + + pointer allocate(size_type n, + std::allocator::const_pointer hint = nullptr) { + assert(bytes_used_ != nullptr); + *bytes_used_ += n * sizeof(T); + return Alloc::allocate(n, hint); + } + + void deallocate(pointer p, size_type n) { + Alloc::deallocate(p, n); + assert(bytes_used_ != nullptr); + *bytes_used_ -= n * sizeof(T); + } + + template + class rebind { + public: + using other = CountingAllocator; + }; + + friend bool operator==(const CountingAllocator& a, + const CountingAllocator& b) { + return a.bytes_used_ == b.bytes_used_; + } + + friend bool operator!=(const CountingAllocator& a, + const CountingAllocator& b) { + return !(a == b); + } + + int64_t* bytes_used_; +}; + +TEST(IntVec, SimpleOps) { + for (int len = 0; len < 20; len++) { + IntVec v; + const IntVec& cv = v; // const alias + + Fill(&v, len); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + + for (int i = 0; i < len; i++) { + EXPECT_EQ(i, v[i]); + EXPECT_EQ(i, v.at(i)); + } + EXPECT_EQ(v.begin(), v.data()); + EXPECT_EQ(cv.begin(), cv.data()); + + int counter = 0; + for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) { + EXPECT_EQ(counter, *iter); + counter++; + } + EXPECT_EQ(counter, len); + + counter = 0; + for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) { + EXPECT_EQ(counter, *iter); + counter++; + } + EXPECT_EQ(counter, len); + + counter = 0; + for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) { + EXPECT_EQ(counter, *iter); + counter++; + } + EXPECT_EQ(counter, len); + + if (len > 0) { + EXPECT_EQ(0, v.front()); + EXPECT_EQ(len - 1, v.back()); + v.pop_back(); + EXPECT_EQ(len - 1, v.size()); + for (int i = 0; i < v.size(); ++i) { + EXPECT_EQ(i, v[i]); + EXPECT_EQ(i, v.at(i)); + } + } + } +} + +TEST(IntVec, AtThrows) { + IntVec v = {1, 2, 3}; + EXPECT_EQ(v.at(2), 3); + ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range, + "failed bounds check"); +} + +TEST(IntVec, ReverseIterator) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + int counter = len; + for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = len; + for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend(); + ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = len; + for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend(); + ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + } +} + +TEST(IntVec, Erase) { + for (int len = 1; len < 20; len++) { + for (int i = 0; i < len; ++i) { + IntVec v; + Fill(&v, len); + v.erase(v.begin() + i); + EXPECT_EQ(len - 1, v.size()); + for (int j = 0; j < i; ++j) { + EXPECT_EQ(j, v[j]); + } + for (int j = i; j < len - 1; ++j) { + EXPECT_EQ(j + 1, v[j]); + } + } + } +} + +// At the end of this test loop, the elements between [erase_begin, erase_end) +// should have reference counts == 0, and all others elements should have +// reference counts == 1. +TEST(RefCountedVec, EraseBeginEnd) { + for (int len = 1; len < 20; ++len) { + for (int erase_begin = 0; erase_begin < len; ++erase_begin) { + for (int erase_end = erase_begin; erase_end <= len; ++erase_end) { + std::vector counts(len, 0); + RefCountedVec v; + for (int i = 0; i < len; ++i) { + v.push_back(RefCounted(i, &counts[i])); + } + + int erase_len = erase_end - erase_begin; + + v.erase(v.begin() + erase_begin, v.begin() + erase_end); + + EXPECT_EQ(len - erase_len, v.size()); + + // Check the elements before the first element erased. + for (int i = 0; i < erase_begin; ++i) { + EXPECT_EQ(i, v[i].value_); + } + + // Check the elements after the first element erased. + for (int i = erase_begin; i < v.size(); ++i) { + EXPECT_EQ(i + erase_len, v[i].value_); + } + + // Check that the elements at the beginning are preserved. + for (int i = 0; i < erase_begin; ++i) { + EXPECT_EQ(1, counts[i]); + } + + // Check that the erased elements are destroyed + for (int i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(0, counts[i]); + } + + // Check that the elements at the end are preserved. + for (int i = erase_end; i< len; ++i) { + EXPECT_EQ(1, counts[i]); + } + } + } + } +} + +struct NoDefaultCtor { + explicit NoDefaultCtor(int) {} +}; +struct NoCopy { + NoCopy() {} + NoCopy(const NoCopy&) = delete; +}; +struct NoAssign { + NoAssign() {} + NoAssign& operator=(const NoAssign&) = delete; +}; +struct MoveOnly { + MoveOnly() {} + MoveOnly(MoveOnly&&) = default; + MoveOnly& operator=(MoveOnly&&) = default; +}; +TEST(InlinedVectorTest, NoDefaultCtor) { + absl::InlinedVector v(10, NoDefaultCtor(2)); + (void)v; +} +TEST(InlinedVectorTest, NoCopy) { + absl::InlinedVector v(10); + (void)v; +} +TEST(InlinedVectorTest, NoAssign) { + absl::InlinedVector v(10); + (void)v; +} +TEST(InlinedVectorTest, MoveOnly) { + absl::InlinedVector v; + v.push_back(MoveOnly{}); + v.push_back(MoveOnly{}); + v.push_back(MoveOnly{}); + v.erase(v.begin()); + v.push_back(MoveOnly{}); + v.erase(v.begin(), v.begin() + 1); + v.insert(v.begin(), MoveOnly{}); + v.emplace(v.begin()); + v.emplace(v.begin(), MoveOnly{}); +} +TEST(InlinedVectorTest, Noexcept) { + EXPECT_TRUE(std::is_nothrow_move_constructible::value); + EXPECT_TRUE((std::is_nothrow_move_constructible< + absl::InlinedVector>::value)); + + struct MoveCanThrow { + MoveCanThrow(MoveCanThrow&&) {} + }; + EXPECT_EQ(absl::default_allocator_is_nothrow::value, + (std::is_nothrow_move_constructible< + absl::InlinedVector>::value)); +} + + +TEST(IntVec, Insert) { + for (int len = 0; len < 20; len++) { + for (int pos = 0; pos <= len; pos++) { + { + // Single element + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, 9999); + IntVec::iterator it = v.insert(v.cbegin() + pos, 9999); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // n elements + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + IntVec::size_type n = 5; + std_v.insert(std_v.begin() + pos, n, 9999); + IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (random access iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + const std::vector input = {9999, 8888, 7777}; + std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); + IntVec::iterator it = + v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (forward iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + const std::forward_list input = {9999, 8888, 7777}; + std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); + IntVec::iterator it = + v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (input iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, {9999, 8888, 7777}); + std::istringstream input("9999 8888 7777"); + IntVec::iterator it = + v.insert(v.cbegin() + pos, std::istream_iterator(input), + std::istream_iterator()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Initializer list + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, {9999, 8888}); + IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888}); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + } + } +} + +TEST(RefCountedVec, InsertConstructorDestructor) { + // Make sure the proper construction/destruction happen during insert + // operations. + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int pos = 0; pos <= len; pos++) { + SCOPED_TRACE(pos); + std::vector counts(len, 0); + int inserted_count = 0; + RefCountedVec v; + for (int i = 0; i < len; ++i) { + SCOPED_TRACE(i); + v.push_back(RefCounted(i, &counts[i])); + } + + EXPECT_THAT(counts, Each(Eq(1))); + + RefCounted insert_element(9999, &inserted_count); + EXPECT_EQ(1, inserted_count); + v.insert(v.begin() + pos, insert_element); + EXPECT_EQ(2, inserted_count); + // Check that the elements at the end are preserved. + EXPECT_THAT(counts, Each(Eq(1))); + EXPECT_EQ(2, inserted_count); + } + } +} + +TEST(IntVec, Resize) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + // Try resizing up and down by k elements + static const int kResizeElem = 1000000; + for (int k = 0; k < 10; k++) { + // Enlarging resize + v.resize(len+k, kResizeElem); + EXPECT_EQ(len+k, v.size()); + EXPECT_LE(len+k, v.capacity()); + for (int i = 0; i < len+k; i++) { + if (i < len) { + EXPECT_EQ(i, v[i]); + } else { + EXPECT_EQ(kResizeElem, v[i]); + } + } + + // Shrinking resize + v.resize(len, kResizeElem); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + for (int i = 0; i < len; i++) { + EXPECT_EQ(i, v[i]); + } + } + } +} + +TEST(IntVec, InitWithLength) { + for (int len = 0; len < 20; len++) { + IntVec v(len, 7); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + for (int i = 0; i < len; i++) { + EXPECT_EQ(7, v[i]); + } + } +} + +TEST(IntVec, CopyConstructorAndAssignment) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + + IntVec v2(v); + EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2); + + for (int start_len = 0; start_len < 20; start_len++) { + IntVec v3; + Fill(&v3, start_len, 99); // Add dummy elements that should go away + v3 = v; + EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3); + } + } +} + +TEST(IntVec, MoveConstructorAndAssignment) { + for (int len = 0; len < 20; len++) { + IntVec v_in; + const int inlined_capacity = v_in.capacity(); + Fill(&v_in, len); + EXPECT_EQ(len, v_in.size()); + EXPECT_LE(len, v_in.capacity()); + + { + IntVec v_temp(v_in); + auto* old_data = v_temp.data(); + IntVec v_out(std::move(v_temp)); + EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); + if (v_in.size() > inlined_capacity) { + // Allocation is moved as a whole, data stays in place. + EXPECT_TRUE(v_out.data() == old_data); + } else { + EXPECT_FALSE(v_out.data() == old_data); + } + } + for (int start_len = 0; start_len < 20; start_len++) { + IntVec v_out; + Fill(&v_out, start_len, 99); // Add dummy elements that should go away + IntVec v_temp(v_in); + auto* old_data = v_temp.data(); + v_out = std::move(v_temp); + EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); + if (v_in.size() > inlined_capacity) { + // Allocation is moved as a whole, data stays in place. + EXPECT_TRUE(v_out.data() == old_data); + } else { + EXPECT_FALSE(v_out.data() == old_data); + } + } + } +} + +TEST(OverheadTest, Storage) { + // Check for size overhead. + // In particular, ensure that std::allocator doesn't cost anything to store. + // The union should be absorbing some of the allocation bookkeeping overhead + // in the larger vectors, leaving only the size_ field as overhead. + EXPECT_EQ(2 * sizeof(int*), + sizeof(absl::InlinedVector) - 1 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 2 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 3 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 4 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 5 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 6 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 7 * sizeof(int*)); + EXPECT_EQ(1 * sizeof(int*), + sizeof(absl::InlinedVector) - 8 * sizeof(int*)); +} + +TEST(IntVec, Clear) { + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + IntVec v; + Fill(&v, len); + v.clear(); + EXPECT_EQ(0, v.size()); + EXPECT_EQ(v.begin(), v.end()); + } +} + +TEST(IntVec, Reserve) { + for (int len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + for (int newlen = 0; newlen < 100; newlen++) { + const int* start_rep = v.data(); + v.reserve(newlen); + const int* final_rep = v.data(); + if (newlen <= len) { + EXPECT_EQ(start_rep, final_rep); + } + EXPECT_LE(newlen, v.capacity()); + + // Filling up to newlen should not change rep + while (v.size() < newlen) { + v.push_back(0); + } + EXPECT_EQ(final_rep, v.data()); + } + } +} + +TEST(StringVec, SelfRefPushBack) { + std::vector std_v; + absl::InlinedVector v; + const std::string s = "A quite long std::string to ensure heap."; + std_v.push_back(s); + v.push_back(s); + for (int i = 0; i < 20; ++i) { + EXPECT_THAT(v, ElementsAreArray(std_v)); + + v.push_back(v.back()); + std_v.push_back(std_v.back()); + } + EXPECT_THAT(v, ElementsAreArray(std_v)); +} + +TEST(StringVec, SelfRefPushBackWithMove) { + std::vector std_v; + absl::InlinedVector v; + const std::string s = "A quite long std::string to ensure heap."; + std_v.push_back(s); + v.push_back(s); + for (int i = 0; i < 20; ++i) { + EXPECT_EQ(v.back(), std_v.back()); + + v.push_back(std::move(v.back())); + std_v.push_back(std::move(std_v.back())); + } + EXPECT_EQ(v.back(), std_v.back()); +} + +TEST(StringVec, SelfMove) { + const std::string s = "A quite long std::string to ensure heap."; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + absl::InlinedVector v; + for (int i = 0; i < len; ++i) { + SCOPED_TRACE(i); + v.push_back(s); + } + // Indirection necessary to avoid compiler warning. + v = std::move(*(&v)); + // Ensure that the inlined vector is still in a valid state by copying it. + // We don't expect specific contents since a self-move results in an + // unspecified valid state. + std::vector copy(v.begin(), v.end()); + } +} + +TEST(IntVec, Swap) { + for (int l1 = 0; l1 < 20; l1++) { + SCOPED_TRACE(l1); + for (int l2 = 0; l2 < 20; l2++) { + SCOPED_TRACE(l2); + IntVec a = Fill(l1, 0); + IntVec b = Fill(l2, 100); + { + using std::swap; + swap(a, b); + } + EXPECT_EQ(l1, b.size()); + EXPECT_EQ(l2, a.size()); + for (int i = 0; i < l1; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(i, b[i]); + } + for (int i = 0; i < l2; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(100 + i, a[i]); + } + } + } +} + +TYPED_TEST_P(InstanceTest, Swap) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + for (int l1 = 0; l1 < 20; l1++) { + SCOPED_TRACE(l1); + for (int l2 = 0; l2 < 20; l2++) { + SCOPED_TRACE(l2); + InstanceTracker tracker; + InstanceVec a, b; + const size_t inlined_capacity = a.capacity(); + for (int i = 0; i < l1; i++) a.push_back(Instance(i)); + for (int i = 0; i < l2; i++) b.push_back(Instance(100+i)); + EXPECT_EQ(tracker.instances(), l1 + l2); + tracker.ResetCopiesMovesSwaps(); + { + using std::swap; + swap(a, b); + } + EXPECT_EQ(tracker.instances(), l1 + l2); + if (a.size() > inlined_capacity && b.size() > inlined_capacity) { + EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped. + EXPECT_EQ(tracker.moves(), 0); + } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) { + EXPECT_EQ(tracker.swaps(), std::min(l1, l2)); + // TODO(bsamwel): This should use moves when the type is movable. + EXPECT_EQ(tracker.copies(), std::max(l1, l2) - std::min(l1, l2)); + } else { + // One is allocated and the other isn't. The allocation is transferred + // without copying elements, and the inlined instances are copied/moved. + EXPECT_EQ(tracker.swaps(), 0); + // TODO(bsamwel): This should use moves when the type is movable. + EXPECT_EQ(tracker.copies(), std::min(l1, l2)); + } + + EXPECT_EQ(l1, b.size()); + EXPECT_EQ(l2, a.size()); + for (int i = 0; i < l1; i++) { + EXPECT_EQ(i, b[i].value()); + } + for (int i = 0; i < l2; i++) { + EXPECT_EQ(100 + i, a[i].value()); + } + } + } +} + +TEST(IntVec, EqualAndNotEqual) { + IntVec a, b; + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + a.push_back(3); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + b.push_back(3); + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + b.push_back(7); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + a.push_back(6); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + a.clear(); + b.clear(); + for (int i = 0; i < 100; i++) { + a.push_back(i); + b.push_back(i); + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + b[i] = b[i] + 1; + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + b[i] = b[i] - 1; // Back to before + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + } +} + +TEST(IntVec, RelationalOps) { + IntVec a, b; + EXPECT_FALSE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a > b); + EXPECT_FALSE(b > a); + EXPECT_TRUE(a <= b); + EXPECT_TRUE(b <= a); + EXPECT_TRUE(a >= b); + EXPECT_TRUE(b >= a); + b.push_back(3); + EXPECT_TRUE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a > b); + EXPECT_TRUE(b > a); + EXPECT_TRUE(a <= b); + EXPECT_FALSE(b <= a); + EXPECT_FALSE(a >= b); + EXPECT_TRUE(b >= a); +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + const size_t inlined_capacity = v.capacity(); + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + + // Enlarging resize() must construct some objects + tracker.ResetCopiesMovesSwaps(); + v.resize(len + 10, Instance(100)); + EXPECT_EQ(tracker.instances(), len + 10); + if (len <= inlined_capacity && len + 10 > inlined_capacity) { + EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len); + } else { + // Only specify a minimum number of copies + moves. We don't want to + // depend on the reallocation policy here. + EXPECT_GE(tracker.copies() + tracker.moves(), + 10); // More due to reallocation. + } + + // Shrinking resize() must destroy some objects + tracker.ResetCopiesMovesSwaps(); + v.resize(len, Instance(100)); + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + + // reserve() must not increase the number of initialized objects + SCOPED_TRACE("reserve"); + v.reserve(len+1000); + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.copies() + tracker.moves(), len); + + // pop_back() and erase() must destroy one object + if (len > 0) { + tracker.ResetCopiesMovesSwaps(); + v.pop_back(); + EXPECT_EQ(tracker.instances(), len - 1); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + + if (!v.empty()) { + tracker.ResetCopiesMovesSwaps(); + v.erase(v.begin()); + EXPECT_EQ(tracker.instances(), len - 2); + EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2); + } + } + + tracker.ResetCopiesMovesSwaps(); + int instances_before_empty_erase = tracker.instances(); + v.erase(v.begin(), v.begin()); + EXPECT_EQ(tracker.instances(), instances_before_empty_erase); + EXPECT_EQ(tracker.copies() + tracker.moves(), 0); + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + { // Copy constructor should create 'len' more instances. + InstanceVec v_copy(v); + EXPECT_EQ(tracker.instances(), len + len); + EXPECT_EQ(tracker.copies(), len); + EXPECT_EQ(tracker.moves(), 0); + } + EXPECT_EQ(tracker.instances(), len); + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + const size_t inlined_capacity = v.capacity(); + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + { + InstanceVec v_copy(std::move(v)); + if (len > inlined_capacity) { + // Allocation is moved as a whole. + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.live_instances(), len); + // Tests an implementation detail, don't rely on this in your code. + EXPECT_EQ(v.size(), 0); // NOLINT misc-use-after-move + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + } else { + EXPECT_EQ(tracker.instances(), len + len); + if (Instance::supports_move()) { + EXPECT_EQ(tracker.live_instances(), len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), len); + } else { + EXPECT_EQ(tracker.live_instances(), len + len); + EXPECT_EQ(tracker.copies(), len); + EXPECT_EQ(tracker.moves(), 0); + } + } + EXPECT_EQ(tracker.swaps(), 0); + } + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int longorshort = 0; longorshort <= 1; ++longorshort) { + SCOPED_TRACE(longorshort); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec longer, shorter; + for (int i = 0; i < len; i++) { + longer.push_back(Instance(i)); + shorter.push_back(Instance(i)); + } + longer.push_back(Instance(len)); + EXPECT_EQ(tracker.instances(), len + len + 1); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + len + 1); // More due to reallocation. + + tracker.ResetCopiesMovesSwaps(); + if (longorshort) { + shorter = longer; + EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1)); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + 1); // More due to reallocation. + } else { + longer = shorter; + EXPECT_EQ(tracker.instances(), len + len); + EXPECT_EQ(tracker.copies() + tracker.moves(), len); + } + } + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int longorshort = 0; longorshort <= 1; ++longorshort) { + SCOPED_TRACE(longorshort); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec longer, shorter; + const int inlined_capacity = longer.capacity(); + for (int i = 0; i < len; i++) { + longer.push_back(Instance(i)); + shorter.push_back(Instance(i)); + } + longer.push_back(Instance(len)); + EXPECT_EQ(tracker.instances(), len + len + 1); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + len + 1); // More due to reallocation. + + tracker.ResetCopiesMovesSwaps(); + int src_len; + if (longorshort) { + src_len = len + 1; + shorter = std::move(longer); + } else { + src_len = len; + longer = std::move(shorter); + } + if (src_len > inlined_capacity) { + // Allocation moved as a whole. + EXPECT_EQ(tracker.instances(), src_len); + EXPECT_EQ(tracker.live_instances(), src_len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + } else { + // Elements are all copied. + EXPECT_EQ(tracker.instances(), src_len + src_len); + if (Instance::supports_move()) { + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), src_len); + EXPECT_EQ(tracker.live_instances(), src_len); + } else { + EXPECT_EQ(tracker.copies(), src_len); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), src_len + src_len); + } + } + EXPECT_EQ(tracker.swaps(), 0); + } + } +} + +TEST(CountElemAssign, SimpleTypeWithInlineBacking) { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(2, 123); + EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123))); + if (original_size <= 2) { + // If the original had inline backing, it should stay inline. + EXPECT_EQ(2, v.capacity()); + } + } +} + +TEST(CountElemAssign, SimpleTypeWithAllocation) { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(3, 123); + EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123))); + EXPECT_LE(v.size(), v.capacity()); + } +} + +TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) { + using Instance = TypeParam; + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(2, Instance(123)); + EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123)))); + if (original_size <= 2) { + // If the original had inline backing, it should stay inline. + EXPECT_EQ(2, v.capacity()); + } + } +} + +template +void InstanceCountElemAssignWithAllocationTest() { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(3, Instance(123)); + EXPECT_THAT(v, + AllOf(SizeIs(3), + ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123)))); + EXPECT_LE(v.size(), v.capacity()); + } +} +TEST(CountElemAssign, WithAllocationCopyableInstance) { + InstanceCountElemAssignWithAllocationTest(); +} +TEST(CountElemAssign, WithAllocationCopyableMovableInstance) { + InstanceCountElemAssignWithAllocationTest(); +} + +TEST(RangedConstructor, SimpleType) { + std::vector source_v = {4, 5, 6}; + // First try to fit in inline backing + absl::InlinedVector v(source_v.begin(), source_v.end()); + EXPECT_EQ(3, v.size()); + EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage + EXPECT_EQ(4, v[0]); + EXPECT_EQ(5, v[1]); + EXPECT_EQ(6, v[2]); + + // Now, force a re-allocate + absl::InlinedVector realloc_v(source_v.begin(), source_v.end()); + EXPECT_EQ(3, realloc_v.size()); + EXPECT_LT(2, realloc_v.capacity()); + EXPECT_EQ(4, realloc_v[0]); + EXPECT_EQ(5, realloc_v[1]); + EXPECT_EQ(6, realloc_v[2]); +} + +// Test for ranged constructors using Instance as the element type and +// SourceContainer as the source container type. +template +void InstanceRangedConstructorTestForContainer() { + InstanceTracker tracker; + SourceContainer source_v = {Instance(0), Instance(1)}; + tracker.ResetCopiesMovesSwaps(); + absl::InlinedVector v(source_v.begin(), + source_v.end()); + EXPECT_EQ(2, v.size()); + EXPECT_LT(1, v.capacity()); + EXPECT_EQ(0, v[0].value()); + EXPECT_EQ(1, v[1].value()); + EXPECT_EQ(tracker.copies(), 2); + EXPECT_EQ(tracker.moves(), 0); +} + +template +void InstanceRangedConstructorTestWithCapacity() { + // Test with const and non-const, random access and non-random-access sources. + // TODO(bsamwel): Test with an input iterator source. + { + SCOPED_TRACE("std::list"); + InstanceRangedConstructorTestForContainer, + inlined_capacity>(); + { + SCOPED_TRACE("const std::list"); + InstanceRangedConstructorTestForContainer< + Instance, const std::list, inlined_capacity>(); + } + { + SCOPED_TRACE("std::vector"); + InstanceRangedConstructorTestForContainer, + inlined_capacity>(); + } + { + SCOPED_TRACE("const std::vector"); + InstanceRangedConstructorTestForContainer< + Instance, const std::vector, inlined_capacity>(); + } + } +} + +TYPED_TEST_P(InstanceTest, RangedConstructor) { + using Instance = TypeParam; + SCOPED_TRACE("capacity=1"); + InstanceRangedConstructorTestWithCapacity(); + SCOPED_TRACE("capacity=2"); + InstanceRangedConstructorTestWithCapacity(); +} + +TEST(RangedConstructor, ElementsAreConstructed) { + std::vector source_v = {"cat", "dog"}; + + // Force expansion and re-allocation of v. Ensures that when the vector is + // expanded that new elements are constructed. + absl::InlinedVector v(source_v.begin(), source_v.end()); + EXPECT_EQ("cat", v[0]); + EXPECT_EQ("dog", v[1]); +} + +TEST(RangedAssign, SimpleType) { + // Test for all combinations of original sizes (empty and non-empty inline, + // and out of line) and target sizes. + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + for (size_t target_size = 0; target_size <= 5; ++target_size) { + SCOPED_TRACE(target_size); + + // New contents are [3, 4, ...] + std::vector new_contents; + for (size_t i = 0; i < target_size; ++i) { + new_contents.push_back(i + 3); + } + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(new_contents.begin(), new_contents.end()); + + EXPECT_EQ(new_contents.size(), v.size()); + EXPECT_LE(new_contents.size(), v.capacity()); + if (target_size <= 3 && original_size <= 3) { + // Storage should stay inline when target size is small. + EXPECT_EQ(3, v.capacity()); + } + EXPECT_THAT(v, ElementsAreArray(new_contents)); + } + } +} + +// Returns true if lhs and rhs have the same value. +template +static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) { + return lhs.value() == rhs.value(); +} + +// Test for ranged assign() using Instance as the element type and +// SourceContainer as the source container type. +template +void InstanceRangedAssignTestForContainer() { + // Test for all combinations of original sizes (empty and non-empty inline, + // and out of line) and target sizes. + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + for (size_t target_size = 0; target_size <= 5; ++target_size) { + SCOPED_TRACE(target_size); + + // New contents are [3, 4, ...] + // Generate data using a non-const container, because SourceContainer + // itself may be const. + // TODO(bsamwel): Test with an input iterator. + std::vector new_contents_in; + for (size_t i = 0; i < target_size; ++i) { + new_contents_in.push_back(Instance(i + 3)); + } + SourceContainer new_contents(new_contents_in.begin(), + new_contents_in.end()); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(new_contents.begin(), new_contents.end()); + + EXPECT_EQ(new_contents.size(), v.size()); + EXPECT_LE(new_contents.size(), v.capacity()); + if (target_size <= 3 && original_size <= 3) { + // Storage should stay inline when target size is small. + EXPECT_EQ(3, v.capacity()); + } + EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(), + InstanceValuesEqual)); + } + } +} + +TYPED_TEST_P(InstanceTest, RangedAssign) { + using Instance = TypeParam; + // Test with const and non-const, random access and non-random-access sources. + // TODO(bsamwel): Test with an input iterator source. + SCOPED_TRACE("std::list"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("const std::list"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("std::vector"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("const std::vector"); + InstanceRangedAssignTestForContainer>(); +} + +TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) { + EXPECT_THAT((absl::InlinedVector{4, 5, 6}), + AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6))); +} + +TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) { + EXPECT_THAT((absl::InlinedVector{4, 5, 6}), + AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6))); +} + +TEST(InitializerListConstructor, DisparateTypesInList) { + EXPECT_THAT((absl::InlinedVector{-7, 8ULL}), ElementsAre(-7, 8)); + + EXPECT_THAT((absl::InlinedVector{"foo", std::string("bar")}), + ElementsAre("foo", "bar")); +} + +TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) { + EXPECT_THAT((absl::InlinedVector{ + CopyableMovableInstance(0)}), + AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0)))); +} + +TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) { + EXPECT_THAT( + (absl::InlinedVector{ + CopyableMovableInstance(0), CopyableMovableInstance(1)}), + AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1)))); +} + +TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) { + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + + absl::InlinedVector v1(original_size, 12345); + const size_t original_capacity_v1 = v1.capacity(); + v1.assign({3}); + EXPECT_THAT( + v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3))); + + absl::InlinedVector v2(original_size, 12345); + const size_t original_capacity_v2 = v2.capacity(); + v2 = {3}; + EXPECT_THAT( + v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3))); + } +} + +TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) { + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v1(original_size, 12345); + v1.assign({3, 4, 5}); + EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); + EXPECT_LE(3, v1.capacity()); + + absl::InlinedVector v2(original_size, 12345); + v2 = {3, 4, 5}; + EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); + EXPECT_LE(3, v2.capacity()); + } +} + +TEST(InitializerListAssign, DisparateTypesInList) { + absl::InlinedVector v_int1; + v_int1.assign({-7, 8ULL}); + EXPECT_THAT(v_int1, ElementsAre(-7, 8)); + + absl::InlinedVector v_int2; + v_int2 = {-7, 8ULL}; + EXPECT_THAT(v_int2, ElementsAre(-7, 8)); + + absl::InlinedVector v_string1; + v_string1.assign({"foo", std::string("bar")}); + EXPECT_THAT(v_string1, ElementsAre("foo", "bar")); + + absl::InlinedVector v_string2; + v_string2 = {"foo", std::string("bar")}; + EXPECT_THAT(v_string2, ElementsAre("foo", "bar")); +} + +TYPED_TEST_P(InstanceTest, InitializerListAssign) { + using Instance = TypeParam; + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v(original_size, Instance(12345)); + const size_t original_capacity = v.capacity(); + v.assign({Instance(3)}); + EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity), + ElementsAre(ValueIs(3)))); + } + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v(original_size, Instance(12345)); + v.assign({Instance(3), Instance(4), Instance(5)}); + EXPECT_THAT(v, AllOf(SizeIs(3), + ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5)))); + EXPECT_LE(3, v.capacity()); + } +} + +REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors, + CountConstructorsDestructorsOnCopyConstruction, + CountConstructorsDestructorsOnMoveConstruction, + CountConstructorsDestructorsOnAssignment, + CountConstructorsDestructorsOnMoveAssignment, + CountElemAssignInlineBacking, RangedConstructor, + RangedAssign, InitializerListAssign); + +using InstanceTypes = + ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes); + +TEST(DynamicVec, DynamicVecCompiles) { + DynamicVec v; + (void)v; +} + +TEST(AllocatorSupportTest, Constructors) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + int64_t allocated = 0; + MyAlloc alloc(&allocated); + { AllocVec ABSL_ATTRIBUTE_UNUSED v; } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); } + + AllocVec v2; + { AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); } +} + +TEST(AllocatorSupportTest, CountAllocations) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + int64_t allocated = 0; + MyAlloc alloc(&allocated); + { + AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc); + EXPECT_THAT(allocated, 0); + } + EXPECT_THAT(allocated, 0); + { + AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + EXPECT_THAT(allocated, v.size() * sizeof(int)); + } + EXPECT_THAT(allocated, 0); + { + AllocVec v(4, 1, alloc); + EXPECT_THAT(allocated, 0); + + int64_t allocated2 = 0; + MyAlloc alloc2(&allocated2); + AllocVec v2(v, alloc2); + EXPECT_THAT(allocated2, 0); + + int64_t allocated3 = 0; + MyAlloc alloc3(&allocated3); + AllocVec v3(std::move(v), alloc3); + EXPECT_THAT(allocated3, 0); + } + EXPECT_THAT(allocated, 0); + { + AllocVec v(8, 2, alloc); + EXPECT_THAT(allocated, v.size() * sizeof(int)); + + int64_t allocated2 = 0; + MyAlloc alloc2(&allocated2); + AllocVec v2(v, alloc2); + EXPECT_THAT(allocated2, v2.size() * sizeof(int)); + + int64_t allocated3 = 0; + MyAlloc alloc3(&allocated3); + AllocVec v3(std::move(v), alloc3); + EXPECT_THAT(allocated3, v3.size() * sizeof(int)); + } +} + +TEST(AllocatorSupportTest, SwapBothAllocated) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + int64_t allocated1 = 0; + int64_t allocated2 = 0; + { + const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + const int ia2[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }; + MyAlloc a1(&allocated1); + MyAlloc a2(&allocated2); + AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); + AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); + EXPECT_LT(v1.capacity(), v2.capacity()); + EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, v2.capacity() * sizeof(int)); + v1.swap(v2); + EXPECT_THAT(v1, ElementsAreArray(ia2)); + EXPECT_THAT(v2, ElementsAreArray(ia1)); + EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, v1.capacity() * sizeof(int)); + } + EXPECT_THAT(allocated1, 0); + EXPECT_THAT(allocated2, 0); +} + +TEST(AllocatorSupportTest, SwapOneAllocated) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + int64_t allocated1 = 0; + int64_t allocated2 = 0; + { + const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + const int ia2[] = { 0, 1, 2, 3 }; + MyAlloc a1(&allocated1); + MyAlloc a2(&allocated2); + AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); + AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); + EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, 0); + v1.swap(v2); + EXPECT_THAT(v1, ElementsAreArray(ia2)); + EXPECT_THAT(v2, ElementsAreArray(ia1)); + EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); + EXPECT_THAT(allocated2, 0); + EXPECT_TRUE(v2.get_allocator() == a1); + EXPECT_TRUE(v1.get_allocator() == a2); + } + EXPECT_THAT(allocated1, 0); + EXPECT_THAT(allocated2, 0); +} + +TEST(AllocatorSupportTest, ScopedAllocatorWorks) { + using StdVector = std::vector>; + using MyAlloc = + std::scoped_allocator_adaptor>; + using AllocVec = absl::InlinedVector; + + int64_t allocated = 0; + AllocVec vec(MyAlloc{CountingAllocator{&allocated}}); + EXPECT_EQ(allocated, 0); + + // This default constructs a vector, but the allocator should pass itself + // into the vector. + // The absl::InlinedVector does not allocate any memory. + // The vector does not allocate any memory. + vec.resize(1); + EXPECT_EQ(allocated, 0); + + // We make vector allocate memory. + // It must go through the allocator even though we didn't construct the + // vector directly. + vec[0].push_back(1); + EXPECT_EQ(allocated, sizeof(int) * 1); + + // Another allocating vector. + vec.push_back(vec[0]); + EXPECT_EQ(allocated, sizeof(int) * 2); + + // Overflow the inlined memory. + // The absl::InlinedVector will now allocate. + vec.resize(5); + EXPECT_EQ(allocated, sizeof(int) * 2 + sizeof(StdVector) * 8); + + // Adding one more in external mode should also work. + vec.push_back(vec[0]); + EXPECT_EQ(allocated, sizeof(int) * 3 + sizeof(StdVector) * 8); + + // And extending these should still work. + vec[0].push_back(1); + EXPECT_EQ(allocated, sizeof(int) * 4 + sizeof(StdVector) * 8); + + vec.clear(); + EXPECT_EQ(allocated, 0); +} + +} // anonymous namespace diff --git a/absl/container/internal/test_instance_tracker.cc b/absl/container/internal/test_instance_tracker.cc new file mode 100644 index 000000000..fe00aca8f --- /dev/null +++ b/absl/container/internal/test_instance_tracker.cc @@ -0,0 +1,26 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/test_instance_tracker.h" + +namespace absl { +namespace test_internal { +int BaseCountedInstance::num_instances_ = 0; +int BaseCountedInstance::num_live_instances_ = 0; +int BaseCountedInstance::num_moves_ = 0; +int BaseCountedInstance::num_copies_ = 0; +int BaseCountedInstance::num_swaps_ = 0; + +} // namespace test_internal +} // namespace absl diff --git a/absl/container/internal/test_instance_tracker.h b/absl/container/internal/test_instance_tracker.h new file mode 100644 index 000000000..cf8f3a531 --- /dev/null +++ b/absl/container/internal/test_instance_tracker.h @@ -0,0 +1,220 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ +#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ + +#include +#include + +namespace absl { +namespace test_internal { + +// A type that counts number of occurences of the type, the live occurrences of +// the type, as well as the number of copies, moves, and swaps that have +// occurred on the type. This is used as a base class for the copyable, +// copyable+movable, and movable types below that are used in actual tests. Use +// InstanceTracker in tests to track the number of instances. +class BaseCountedInstance { + public: + explicit BaseCountedInstance(int x) : value_(x) { + ++num_instances_; + ++num_live_instances_; + } + BaseCountedInstance(const BaseCountedInstance& x) + : value_(x.value_), is_live_(x.is_live_) { + ++num_instances_; + if (is_live_) ++num_live_instances_; + ++num_copies_; + } + BaseCountedInstance(BaseCountedInstance&& x) + : value_(x.value_), is_live_(x.is_live_) { + x.is_live_ = false; + ++num_instances_; + ++num_moves_; + } + ~BaseCountedInstance() { + --num_instances_; + if (is_live_) --num_live_instances_; + } + + BaseCountedInstance& operator=(const BaseCountedInstance& x) { + value_ = x.value_; + if (is_live_) --num_live_instances_; + is_live_ = x.is_live_; + if (is_live_) ++num_live_instances_; + ++num_copies_; + return *this; + } + BaseCountedInstance& operator=(BaseCountedInstance&& x) { + value_ = x.value_; + if (is_live_) --num_live_instances_; + is_live_ = x.is_live_; + x.is_live_ = false; + ++num_moves_; + return *this; + } + + int value() const { + if (!is_live_) std::abort(); + return value_; + } + + friend std::ostream& operator<<(std::ostream& o, + const BaseCountedInstance& v) { + return o << "[value:" << v.value() << "]"; + } + + // Implementation of efficient swap() that counts swaps. + static void SwapImpl( + BaseCountedInstance& lhs, // NOLINT(runtime/references) + BaseCountedInstance& rhs) { // NOLINT(runtime/references) + using std::swap; + swap(lhs.value_, rhs.value_); + swap(lhs.is_live_, rhs.is_live_); + ++BaseCountedInstance::num_swaps_; + } + + private: + friend class InstanceTracker; + + int value_; + + // Indicates if the value is live, ie it hasn't been moved away from. + bool is_live_ = true; + + // Number of instances. + static int num_instances_; + + // Number of live instances (those that have not been moved away from.) + static int num_live_instances_; + + // Number of times that BaseCountedInstance objects were moved. + static int num_moves_; + + // Number of times that BaseCountedInstance objects were copied. + static int num_copies_; + + // Number of times that BaseCountedInstance objects were swapped. + static int num_swaps_; +}; + +// Helper to track the BaseCountedInstance instance counters. Expects that the +// number of instances and live_instances are the same when it is constructed +// and when it is destructed. +class InstanceTracker { + public: + InstanceTracker() + : start_instances_(BaseCountedInstance::num_instances_), + start_live_instances_(BaseCountedInstance::num_live_instances_) { + ResetCopiesMovesSwaps(); + } + ~InstanceTracker() { + if (instances() != 0) std::abort(); + if (live_instances() != 0) std::abort(); + } + + // Returns the number of BaseCountedInstance instances both containing valid + // values and those moved away from compared to when the InstanceTracker was + // constructed + int instances() const { + return BaseCountedInstance::num_instances_ - start_instances_; + } + + // Returns the number of live BaseCountedInstance instances compared to when + // the InstanceTracker was constructed + int live_instances() const { + return BaseCountedInstance::num_live_instances_ - start_live_instances_; + } + + // Returns the number of moves on BaseCountedInstance objects since + // construction or since the last call to ResetCopiesMovesSwaps(). + int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; } + + // Returns the number of copies on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int copies() const { + return BaseCountedInstance::num_copies_ - start_copies_; + } + + // Returns the number of swaps on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; } + + // Resets the base values for moves, copies and swaps to the current values, + // so that subsequent Get*() calls for moves, copies and swaps will compare to + // the situation at the point of this call. + void ResetCopiesMovesSwaps() { + start_moves_ = BaseCountedInstance::num_moves_; + start_copies_ = BaseCountedInstance::num_copies_; + start_swaps_ = BaseCountedInstance::num_swaps_; + } + + private: + int start_instances_; + int start_live_instances_; + int start_moves_; + int start_copies_; + int start_swaps_; +}; + +// Copyable, not movable. +class CopyableOnlyInstance : public BaseCountedInstance { + public: + explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {} + CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default; + CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default; + + friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() { return false; } +}; + +// Copyable and movable. +class CopyableMovableInstance : public BaseCountedInstance { + public: + explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {} + CopyableMovableInstance(const CopyableMovableInstance& rhs) = default; + CopyableMovableInstance(CopyableMovableInstance&& rhs) = default; + CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) = + default; + CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default; + + friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() { return true; } +}; + +// Only movable, not default-constructible. +class MovableOnlyInstance : public BaseCountedInstance { + public: + explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {} + MovableOnlyInstance(MovableOnlyInstance&& other) = default; + MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default; + + friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() { return true; } +}; + +} // namespace test_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ diff --git a/absl/container/internal/test_instance_tracker_test.cc b/absl/container/internal/test_instance_tracker_test.cc new file mode 100644 index 000000000..9efb6771c --- /dev/null +++ b/absl/container/internal/test_instance_tracker_test.cc @@ -0,0 +1,160 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/test_instance_tracker.h" + +#include "gtest/gtest.h" + +namespace { + +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::CopyableOnlyInstance; +using absl::test_internal::InstanceTracker; +using absl::test_internal::MovableOnlyInstance; + +TEST(TestInstanceTracker, CopyableMovable) { + InstanceTracker tracker; + CopyableMovableInstance src(1); + EXPECT_EQ(1, src.value()) << src; + CopyableMovableInstance copy(src); + CopyableMovableInstance move(std::move(src)); + EXPECT_EQ(1, tracker.copies()); + EXPECT_EQ(1, tracker.moves()); + EXPECT_EQ(0, tracker.swaps()); + EXPECT_EQ(3, tracker.instances()); + EXPECT_EQ(2, tracker.live_instances()); + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance copy_assign(1); + copy_assign = copy; + CopyableMovableInstance move_assign(1); + move_assign = std::move(move); + EXPECT_EQ(1, tracker.copies()); + EXPECT_EQ(1, tracker.moves()); + EXPECT_EQ(0, tracker.swaps()); + EXPECT_EQ(5, tracker.instances()); + EXPECT_EQ(3, tracker.live_instances()); + tracker.ResetCopiesMovesSwaps(); + + { + using std::swap; + swap(move_assign, copy); + swap(copy, move_assign); + EXPECT_EQ(2, tracker.swaps()); + EXPECT_EQ(0, tracker.copies()); + EXPECT_EQ(0, tracker.moves()); + EXPECT_EQ(5, tracker.instances()); + EXPECT_EQ(3, tracker.live_instances()); + } +} + +TEST(TestInstanceTracker, CopyableOnly) { + InstanceTracker tracker; + CopyableOnlyInstance src(1); + EXPECT_EQ(1, src.value()) << src; + CopyableOnlyInstance copy(src); + CopyableOnlyInstance copy2(std::move(src)); // NOLINT + EXPECT_EQ(2, tracker.copies()); + EXPECT_EQ(0, tracker.moves()); + EXPECT_EQ(3, tracker.instances()); + EXPECT_EQ(3, tracker.live_instances()); + tracker.ResetCopiesMovesSwaps(); + + CopyableOnlyInstance copy_assign(1); + copy_assign = copy; + CopyableOnlyInstance copy_assign2(1); + copy_assign2 = std::move(copy2); // NOLINT + EXPECT_EQ(2, tracker.copies()); + EXPECT_EQ(0, tracker.moves()); + EXPECT_EQ(5, tracker.instances()); + EXPECT_EQ(5, tracker.live_instances()); + tracker.ResetCopiesMovesSwaps(); + + { + using std::swap; + swap(src, copy); + swap(copy, src); + EXPECT_EQ(2, tracker.swaps()); + EXPECT_EQ(0, tracker.copies()); + EXPECT_EQ(0, tracker.moves()); + EXPECT_EQ(5, tracker.instances()); + EXPECT_EQ(5, tracker.live_instances()); + } +} + +TEST(TestInstanceTracker, MovableOnly) { + InstanceTracker tracker; + MovableOnlyInstance src(1); + EXPECT_EQ(1, src.value()) << src; + MovableOnlyInstance move(std::move(src)); + MovableOnlyInstance move_assign(2); + move_assign = std::move(move); + EXPECT_EQ(3, tracker.instances()); + EXPECT_EQ(1, tracker.live_instances()); + EXPECT_EQ(2, tracker.moves()); + EXPECT_EQ(0, tracker.copies()); + tracker.ResetCopiesMovesSwaps(); + + { + using std::swap; + MovableOnlyInstance other(2); + swap(move_assign, other); + swap(other, move_assign); + EXPECT_EQ(2, tracker.swaps()); + EXPECT_EQ(0, tracker.copies()); + EXPECT_EQ(0, tracker.moves()); + EXPECT_EQ(4, tracker.instances()); + EXPECT_EQ(2, tracker.live_instances()); + } +} + +TEST(TestInstanceTracker, ExistingInstances) { + CopyableMovableInstance uncounted_instance(1); + CopyableMovableInstance uncounted_live_instance( + std::move(uncounted_instance)); + InstanceTracker tracker; + EXPECT_EQ(0, tracker.instances()); + EXPECT_EQ(0, tracker.live_instances()); + EXPECT_EQ(0, tracker.copies()); + { + CopyableMovableInstance instance1(1); + EXPECT_EQ(1, tracker.instances()); + EXPECT_EQ(1, tracker.live_instances()); + EXPECT_EQ(0, tracker.copies()); + EXPECT_EQ(0, tracker.moves()); + { + InstanceTracker tracker2; + CopyableMovableInstance instance2(instance1); + CopyableMovableInstance instance3(std::move(instance2)); + EXPECT_EQ(3, tracker.instances()); + EXPECT_EQ(2, tracker.live_instances()); + EXPECT_EQ(1, tracker.copies()); + EXPECT_EQ(1, tracker.moves()); + EXPECT_EQ(2, tracker2.instances()); + EXPECT_EQ(1, tracker2.live_instances()); + EXPECT_EQ(1, tracker2.copies()); + EXPECT_EQ(1, tracker2.moves()); + } + EXPECT_EQ(1, tracker.instances()); + EXPECT_EQ(1, tracker.live_instances()); + EXPECT_EQ(1, tracker.copies()); + EXPECT_EQ(1, tracker.moves()); + } + EXPECT_EQ(0, tracker.instances()); + EXPECT_EQ(0, tracker.live_instances()); + EXPECT_EQ(1, tracker.copies()); + EXPECT_EQ(1, tracker.moves()); +} + +} // namespace diff --git a/absl/copts.bzl b/absl/copts.bzl new file mode 100644 index 000000000..68aafd5c5 --- /dev/null +++ b/absl/copts.bzl @@ -0,0 +1,138 @@ +"""absl specific copts. + +Flags specified here must not impact ABI. Code compiled with and without these +opts will be linked together, and in some cases headers compiled with and +without these options will be part of the same program. +""" +GCC_FLAGS = [ + "-Wall", + "-Wextra", + "-Wcast-qual", + "-Wconversion-null", + "-Wmissing-declarations", + "-Woverlength-strings", + "-Wpointer-arith", + "-Wunused-local-typedefs", + "-Wunused-result", + "-Wvarargs", + "-Wvla", # variable-length array + "-Wwrite-strings", +] + +GCC_TEST_FLAGS = [ + "-Wno-conversion-null", + "-Wno-missing-declarations", + "-Wno-sign-compare", + "-Wno-unused-function", + "-Wno-unused-parameter", + "-Wno-unused-private-field", +] + +LLVM_FLAGS = [ + "-Wall", + "-Wextra", + "-Weverything", + "-Wno-c++98-compat-pedantic", + "-Wno-comma", + "-Wno-conversion", + "-Wno-disabled-macro-expansion", + "-Wno-documentation", + "-Wno-documentation-unknown-command", + "-Wno-double-promotion", + "-Wno-exit-time-destructors", + "-Wno-extra-semi", + "-Wno-float-conversion", + "-Wno-float-equal", + "-Wno-format-nonliteral", + "-Wno-gcc-compat", + "-Wno-global-constructors", + "-Wno-google3-inheriting-constructor", + "-Wno-google3-lambda-expression", + "-Wno-google3-rvalue-reference", + "-Wno-google3-trailing-return-type", + "-Wno-nested-anon-types", + "-Wno-non-modular-include-in-module", + "-Wno-old-style-cast", + "-Wno-packed", + "-Wno-padded", + "-Wno-range-loop-analysis", + "-Wno-reserved-id-macro", + "-Wno-shorten-64-to-32", + "-Wno-sign-conversion", + "-Wno-switch-enum", + "-Wno-thread-safety-negative", + "-Wno-undef", + "-Wno-unused-macros", + "-Wno-weak-vtables", + # flags below are also controled by -Wconversion which is disabled + "-Wbitfield-enum-conversion", + "-Wbool-conversion", + "-Wconstant-conversion", + "-Wenum-conversion", + "-Wint-conversion", + "-Wliteral-conversion", + "-Wnon-literal-null-conversion", + "-Wnull-conversion", + "-Wobjc-literal-conversion", + "-Wstring-conversion", +] + +LLVM_TEST_FLAGS = [ + "-Wno-c99-extensions", + "-Wno-missing-noreturn", + "-Wno-missing-prototypes", + "-Wno-null-conversion", + "-Wno-shadow", + "-Wno-shift-sign-overflow", + "-Wno-sign-compare", + "-Wno-unused-function", + "-Wno-unused-member-function", + "-Wno-unused-parameter", + "-Wno-unused-private-field", + "-Wno-unused-template", + "-Wno-used-but-marked-unused", + "-Wno-zero-as-null-pointer-constant", +] + +MSVC_FLAGS = [ + "/W3", + "/WX", + "/wd4005", # macro-redifinition + "/wd4068", # unknown pragma + "/wd4244", # conversion from 'type1' to 'type2', possible loss of data + "/wd4267", # conversion from 'size_t' to 'type', possible loss of data + "/wd4800", # forcing value to bool 'true' or 'false' (performance warning) + "/DWIN32_LEAN_AND_MEAN", # Don't bloat namespace with incompatible winsock versions. +] + +MSVC_TEST_FLAGS = [ + "/wd4018", # signed/unsigned mismatch + "/wd4101", # unreferenced local variable + "/wd4503", # decorated name length exceeded, name was truncated +] + +def _qualify_flags(scope, flags): + return [scope + x for x in flags] + +HYBRID_FLAGS = _qualify_flags("-Xgcc-only=", GCC_FLAGS) + _qualify_flags("-Xclang-only=", LLVM_FLAGS) +HYBRID_TEST_FLAGS = _qualify_flags("-Xgcc-only=", GCC_TEST_FLAGS) + _qualify_flags("-Xclang-only=", LLVM_TEST_FLAGS) + +# /Wall with msvc includes unhelpful warnings such as C4711, C4710, ... +ABSL_DEFAULT_COPTS = select({ + "//absl:windows": MSVC_FLAGS, + "//absl:llvm_warnings": LLVM_FLAGS, + "//conditions:default": GCC_FLAGS, +}) + +# in absence of modules (--compiler=gcc or -c opt), cc_tests leak their copts +# to their (included header) dependencies and fail to build outside absl +ABSL_TEST_COPTS = ABSL_DEFAULT_COPTS + select({ + "//absl:windows": MSVC_TEST_FLAGS, + "//absl:llvm_warnings": LLVM_TEST_FLAGS, + "//conditions:default": GCC_TEST_FLAGS, +}) + +ABSL_EXCEPTIONS_FLAG = select({ + "//absl:windows": ["/U_HAS_EXCEPTIONS", "/D_HAS_EXCEPTIONS=1", "/EHsc"], + "//conditions:default": ["-fexceptions"], +}) diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel new file mode 100644 index 000000000..84acf9f99 --- /dev/null +++ b/absl/debugging/BUILD.bazel @@ -0,0 +1,170 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts.bzl", + "ABSL_DEFAULT_COPTS", +) + +package( + default_visibility = ["//visibility:public"], +) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "stacktrace", + srcs = [ + "stacktrace.cc", + ], + hdrs = ["stacktrace.h"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":debugging_internal", + "//absl/base", + "//absl/base:core_headers", + ], +) + +cc_library( + name = "debugging_internal", + srcs = [ + "internal/address_is_readable.cc", + "internal/elf_mem_image.cc", + "internal/vdso_support.cc", + ], + hdrs = [ + "internal/address_is_readable.h", + "internal/elf_mem_image.h", + "internal/stacktrace_aarch64-inl.inc", + "internal/stacktrace_arm-inl.inc", + "internal/stacktrace_config.h", + "internal/stacktrace_generic-inl.inc", + "internal/stacktrace_libunwind-inl.inc", + "internal/stacktrace_powerpc-inl.inc", + "internal/stacktrace_unimplemented-inl.inc", + "internal/stacktrace_win32-inl.inc", + "internal/stacktrace_x86-inl.inc", + "internal/vdso_support.h", + ], + copts = ABSL_DEFAULT_COPTS, + deps = [ + "//absl/base", + "//absl/base:dynamic_annotations", + "//absl/base:core_headers", + ], +) + +cc_library( + name = "leak_check", + srcs = select({ + # The leak checking interface depends on weak function + # declarations that may not necessarily have definitions. + # Windows doesn't support this, and ios requires + # guaranteed definitions for weak symbols. + "//absl:ios": [], + "//absl:windows": [], + "//conditions:default": [ + "leak_check.cc", + ], + }), + hdrs = select({ + "//absl:ios": [], + "//absl:windows": [], + "//conditions:default": ["leak_check.h"], + }), + deps = ["//absl/base:core_headers"], +) + +# Adding a dependency to leak_check_disable will disable +# sanitizer leak checking (asan/lsan) in a test without +# the need to mess around with build features. +cc_library( + name = "leak_check_disable", + srcs = ["leak_check_disable.cc"], + linkstatic = 1, + alwayslink = 1, +) + +# These targets exists for use in tests only, explicitly configuring the +# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan. +ABSL_LSAN_LINKOPTS = select({ + "//absl:llvm_compiler": ["-fsanitize=leak"], + "//conditions:default": [], +}) + +cc_library( + name = "leak_check_api_enabled_for_testing", + testonly = 1, + srcs = ["leak_check.cc"], + hdrs = ["leak_check.h"], + copts = select({ + "//absl:llvm_compiler": ["-DLEAK_SANITIZER"], + "//conditions:default": [], + }), + visibility = ["//visibility:private"], +) + +cc_library( + name = "leak_check_api_disabled_for_testing", + testonly = 1, + srcs = ["leak_check.cc"], + hdrs = ["leak_check.h"], + copts = ["-ULEAK_SANITIZER"], + visibility = ["//visibility:private"], +) + +cc_test( + name = "leak_check_test", + srcs = ["leak_check_test.cc"], + copts = select({ + "//absl:llvm_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"], + "//conditions:default": [], + }), + linkopts = ABSL_LSAN_LINKOPTS, + deps = [ + ":leak_check_api_enabled_for_testing", + "//absl/base", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "leak_check_no_lsan_test", + srcs = ["leak_check_test.cc"], + copts = ["-UABSL_EXPECT_LEAK_SANITIZER"], + deps = [ + ":leak_check_api_disabled_for_testing", + "//absl/base", # for raw_logging + "@com_google_googletest//:gtest_main", + ], +) + +# Test that leak checking is skipped when lsan is enabled but +# ":leak_check_disable" is linked in. +# +# This test should fail in the absence of a dependency on ":leak_check_disable" +cc_test( + name = "disabled_leak_check_test", + srcs = ["leak_check_fail_test.cc"], + linkopts = ABSL_LSAN_LINKOPTS, + deps = [ + ":leak_check_api_enabled_for_testing", + ":leak_check_disable", + "//absl/base", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/absl/debugging/internal/address_is_readable.cc b/absl/debugging/internal/address_is_readable.cc new file mode 100644 index 000000000..037ea54c3 --- /dev/null +++ b/absl/debugging/internal/address_is_readable.cc @@ -0,0 +1,134 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// base::AddressIsReadable() probes an address to see whether it is readable, +// without faulting. + +#include "absl/debugging/internal/address_is_readable.h" + +#if !defined(__linux__) || defined(__ANDROID__) + +namespace absl { +namespace debug_internal { + +// On platforms other than Linux, just return true. +bool AddressIsReadable(const void* /* addr */) { return true; } + +} // namespace debug_internal +} // namespace absl + +#else + +#include +#include +#include + +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +namespace debug_internal { + +// Pack a pid and two file descriptors into a 64-bit word, +// using 16, 24, and 24 bits for each respectively. +static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) { + ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0, + "fd out of range"); + return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff); +} + +// Unpack x into a pid and two file descriptors, where x was created with +// Pack(). +static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) { + *pid = x >> 48; + *read_fd = (x >> 24) & 0xffffff; + *write_fd = x & 0xffffff; +} + +// Return whether the byte at *addr is readable, without faulting. +// Save and restores errno. Returns true on systems where +// unimplemented. +// This is a namespace-scoped variable for correct zero-initialization. +static std::atomic pid_and_fds; // initially 0, an invalid pid. +bool AddressIsReadable(const void *addr) { + int save_errno = errno; + // We test whether a byte is readable by using write(). Normally, this would + // be done via a cached file descriptor to /dev/null, but linux fails to + // check whether the byte is readable when the destination is /dev/null, so + // we use a cached pipe. We store the pid of the process that created the + // pipe to handle the case where a process forks, and the child closes all + // the file descriptors and then calls this routine. This is not perfect: + // the child could use the routine, then close all file descriptors and then + // use this routine again. But the likely use of this routine is when + // crashing, to test the validity of pages when dumping the stack. Beware + // that we may leak file descriptors, but we're unlikely to leak many. + int bytes_written; + int current_pid = getpid() & 0xffff; // we use only the low order 16 bits + do { // until we do not get EBADF trying to use file descriptors + int pid; + int read_fd; + int write_fd; + uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed); + Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); + while (current_pid != pid) { + int p[2]; + // new pipe + if (pipe(p) != 0) { + ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno); + } + fcntl(p[0], F_SETFD, FD_CLOEXEC); + fcntl(p[1], F_SETFD, FD_CLOEXEC); + uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]); + if (pid_and_fds.compare_exchange_strong( + local_pid_and_fds, new_pid_and_fds, std::memory_order_relaxed, + std::memory_order_relaxed)) { + local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads + } else { // fds not exposed to other threads; we can close them. + close(p[0]); + close(p[1]); + local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed); + } + Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd); + } + errno = 0; + // Use syscall(SYS_write, ...) instead of write() to prevent ASAN + // and other checkers from complaining about accesses to arbitrary + // memory. + do { + bytes_written = syscall(SYS_write, write_fd, addr, 1); + } while (bytes_written == -1 && errno == EINTR); + if (bytes_written == 1) { // remove the byte from the pipe + char c; + while (read(read_fd, &c, 1) == -1 && errno == EINTR) { + } + } + if (errno == EBADF) { // Descriptors invalid. + // If pid_and_fds contains the problematic file descriptors we just used, + // this call will forget them, and the loop will try again. + pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0, + std::memory_order_relaxed, + std::memory_order_relaxed); + } + } while (errno == EBADF); + errno = save_errno; + return bytes_written == 1; +} + +} // namespace debug_internal +} // namespace absl + +#endif diff --git a/absl/debugging/internal/address_is_readable.h b/absl/debugging/internal/address_is_readable.h new file mode 100644 index 000000000..a8b329235 --- /dev/null +++ b/absl/debugging/internal/address_is_readable.h @@ -0,0 +1,29 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ +#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ + +namespace absl { +namespace debug_internal { + +// Return whether the byte at *addr is readable, without faulting. +// Save and restores errno. +bool AddressIsReadable(const void *addr); + +} // namespace debug_internal +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc new file mode 100644 index 000000000..f6c6bc07d --- /dev/null +++ b/absl/debugging/internal/elf_mem_image.cc @@ -0,0 +1,397 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Allow dynamic symbol lookup in an in-memory Elf image. +// + +#include "absl/debugging/internal/elf_mem_image.h" + +#ifdef ABSL_HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h + +#include +#include +#include +#include "absl/base/internal/raw_logging.h" + +// From binutils/include/elf/common.h (this doesn't appear to be documented +// anywhere else). +// +// /* This flag appears in a Versym structure. It means that the symbol +// is hidden, and is only visible with an explicit version number. +// This is a GNU extension. */ +// #define VERSYM_HIDDEN 0x8000 +// +// /* This is the mask for the rest of the Versym information. */ +// #define VERSYM_VERSION 0x7fff + +#define VERSYM_VERSION 0x7fff + +namespace absl { +namespace debug_internal { + +namespace { + +#if __WORDSIZE == 32 +const int kElfClass = ELFCLASS32; +int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); } +int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); } +#elif __WORDSIZE == 64 +const int kElfClass = ELFCLASS64; +int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); } +int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); } +#else +const int kElfClass = -1; +int ElfBind(const ElfW(Sym) *) { + ABSL_RAW_LOG(FATAL, "Unexpected word size"); + return 0; +} +int ElfType(const ElfW(Sym) *) { + ABSL_RAW_LOG(FATAL, "Unexpected word size"); + return 0; +} +#endif + +// Extract an element from one of the ELF tables, cast it to desired type. +// This is just a simple arithmetic and a glorified cast. +// Callers are responsible for bounds checking. +template +const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset, + ElfW(Word) element_size, size_t index) { + return reinterpret_cast(reinterpret_cast(ehdr) + + table_offset + + index * element_size); +} + +} // namespace + +const void *const ElfMemImage::kInvalidBase = + reinterpret_cast(~0L); + +ElfMemImage::ElfMemImage(const void *base) { + ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer"); + Init(base); +} + +int ElfMemImage::GetNumSymbols() const { + if (!hash_) { + return 0; + } + // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash + return hash_[1]; +} + +const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const { + ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range"); + return dynsym_ + index; +} + +const ElfW(Versym) *ElfMemImage::GetVersym(int index) const { + ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range"); + return versym_ + index; +} + +const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const { + ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range"); + return GetTableElement(ehdr_, + ehdr_->e_phoff, + ehdr_->e_phentsize, + index); +} + +const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const { + ABSL_RAW_CHECK(offset < strsize_, "offset out of range"); + return dynstr_ + offset; +} + +const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const { + if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) { + // Symbol corresponds to "special" (e.g. SHN_ABS) section. + return reinterpret_cast(sym->st_value); + } + ABSL_RAW_CHECK(link_base_ < sym->st_value, "symbol out of range"); + return GetTableElement(ehdr_, 0, 1, sym->st_value) - link_base_; +} + +const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const { + ABSL_RAW_CHECK(0 <= index && static_cast(index) <= verdefnum_, + "index out of range"); + const ElfW(Verdef) *version_definition = verdef_; + while (version_definition->vd_ndx < index && version_definition->vd_next) { + const char *const version_definition_as_char = + reinterpret_cast(version_definition); + version_definition = + reinterpret_cast(version_definition_as_char + + version_definition->vd_next); + } + return version_definition->vd_ndx == index ? version_definition : nullptr; +} + +const ElfW(Verdaux) *ElfMemImage::GetVerdefAux( + const ElfW(Verdef) *verdef) const { + return reinterpret_cast(verdef+1); +} + +const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const { + ABSL_RAW_CHECK(offset < strsize_, "offset out of range"); + return dynstr_ + offset; +} + +void ElfMemImage::Init(const void *base) { + ehdr_ = nullptr; + dynsym_ = nullptr; + dynstr_ = nullptr; + versym_ = nullptr; + verdef_ = nullptr; + hash_ = nullptr; + strsize_ = 0; + verdefnum_ = 0; + link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this. + if (!base) { + return; + } + const intptr_t base_as_uintptr_t = reinterpret_cast(base); + // Fake VDSO has low bit set. + const bool fake_vdso = ((base_as_uintptr_t & 1) != 0); + base = reinterpret_cast(base_as_uintptr_t & ~1); + const char *const base_as_char = reinterpret_cast(base); + if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 || + base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) { + assert(false); + return; + } + int elf_class = base_as_char[EI_CLASS]; + if (elf_class != kElfClass) { + assert(false); + return; + } + switch (base_as_char[EI_DATA]) { + case ELFDATA2LSB: { + if (__LITTLE_ENDIAN != __BYTE_ORDER) { + assert(false); + return; + } + break; + } + case ELFDATA2MSB: { + if (__BIG_ENDIAN != __BYTE_ORDER) { + assert(false); + return; + } + break; + } + default: { + assert(false); + return; + } + } + + ehdr_ = reinterpret_cast(base); + const ElfW(Phdr) *dynamic_program_header = nullptr; + for (int i = 0; i < ehdr_->e_phnum; ++i) { + const ElfW(Phdr) *const program_header = GetPhdr(i); + switch (program_header->p_type) { + case PT_LOAD: + if (!~link_base_) { + link_base_ = program_header->p_vaddr; + } + break; + case PT_DYNAMIC: + dynamic_program_header = program_header; + break; + } + } + if (!~link_base_ || !dynamic_program_header) { + assert(false); + // Mark this image as not present. Can not recur infinitely. + Init(nullptr); + return; + } + ptrdiff_t relocation = + base_as_char - reinterpret_cast(link_base_); + ElfW(Dyn) *dynamic_entry = + reinterpret_cast(dynamic_program_header->p_vaddr + + relocation); + for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) { + ElfW(Xword) value = dynamic_entry->d_un.d_val; + if (fake_vdso) { + // A complication: in the real VDSO, dynamic entries are not relocated + // (it wasn't loaded by a dynamic loader). But when testing with a + // "fake" dlopen()ed vdso library, the loader relocates some (but + // not all!) of them before we get here. + if (dynamic_entry->d_tag == DT_VERDEF) { + // The only dynamic entry (of the ones we care about) libc-2.3.6 + // loader doesn't relocate. + value += relocation; + } + } else { + // Real VDSO. Everything needs to be relocated. + value += relocation; + } + switch (dynamic_entry->d_tag) { + case DT_HASH: + hash_ = reinterpret_cast(value); + break; + case DT_SYMTAB: + dynsym_ = reinterpret_cast(value); + break; + case DT_STRTAB: + dynstr_ = reinterpret_cast(value); + break; + case DT_VERSYM: + versym_ = reinterpret_cast(value); + break; + case DT_VERDEF: + verdef_ = reinterpret_cast(value); + break; + case DT_VERDEFNUM: + verdefnum_ = dynamic_entry->d_un.d_val; + break; + case DT_STRSZ: + strsize_ = dynamic_entry->d_un.d_val; + break; + default: + // Unrecognized entries explicitly ignored. + break; + } + } + if (!hash_ || !dynsym_ || !dynstr_ || !versym_ || + !verdef_ || !verdefnum_ || !strsize_) { + assert(false); // invalid VDSO + // Mark this image as not present. Can not recur infinitely. + Init(nullptr); + return; + } +} + +bool ElfMemImage::LookupSymbol(const char *name, + const char *version, + int type, + SymbolInfo *info_out) const { + for (const SymbolInfo& info : *this) { + if (strcmp(info.name, name) == 0 && strcmp(info.version, version) == 0 && + ElfType(info.symbol) == type) { + if (info_out) { + *info_out = info; + } + return true; + } + } + return false; +} + +bool ElfMemImage::LookupSymbolByAddress(const void *address, + SymbolInfo *info_out) const { + for (const SymbolInfo& info : *this) { + const char *const symbol_start = + reinterpret_cast(info.address); + const char *const symbol_end = symbol_start + info.symbol->st_size; + if (symbol_start <= address && address < symbol_end) { + if (info_out) { + // Client wants to know details for that symbol (the usual case). + if (ElfBind(info.symbol) == STB_GLOBAL) { + // Strong symbol; just return it. + *info_out = info; + return true; + } else { + // Weak or local. Record it, but keep looking for a strong one. + *info_out = info; + } + } else { + // Client only cares if there is an overlapping symbol. + return true; + } + } + } + return false; +} + +ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index) + : index_(index), image_(image) { +} + +const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const { + return &info_; +} + +const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const { + return info_; +} + +bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const { + return this->image_ == rhs.image_ && this->index_ == rhs.index_; +} + +bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const { + return !(*this == rhs); +} + +ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() { + this->Update(1); + return *this; +} + +ElfMemImage::SymbolIterator ElfMemImage::begin() const { + SymbolIterator it(this, 0); + it.Update(0); + return it; +} + +ElfMemImage::SymbolIterator ElfMemImage::end() const { + return SymbolIterator(this, GetNumSymbols()); +} + +void ElfMemImage::SymbolIterator::Update(int increment) { + const ElfMemImage *image = reinterpret_cast(image_); + ABSL_RAW_CHECK(image->IsPresent() || increment == 0, ""); + if (!image->IsPresent()) { + return; + } + index_ += increment; + if (index_ >= image->GetNumSymbols()) { + index_ = image->GetNumSymbols(); + return; + } + const ElfW(Sym) *symbol = image->GetDynsym(index_); + const ElfW(Versym) *version_symbol = image->GetVersym(index_); + ABSL_RAW_CHECK(symbol && version_symbol, ""); + const char *const symbol_name = image->GetDynstr(symbol->st_name); + const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION; + const ElfW(Verdef) *version_definition = nullptr; + const char *version_name = ""; + if (symbol->st_shndx == SHN_UNDEF) { + // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and + // version_index could well be greater than verdefnum_, so calling + // GetVerdef(version_index) may trigger assertion. + } else { + version_definition = image->GetVerdef(version_index); + } + if (version_definition) { + // I am expecting 1 or 2 auxiliary entries: 1 for the version itself, + // optional 2nd if the version has a parent. + ABSL_RAW_CHECK( + version_definition->vd_cnt == 1 || version_definition->vd_cnt == 2, + "wrong number of entries"); + const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition); + version_name = image->GetVerstr(version_aux->vda_name); + } + info_.name = symbol_name; + info_.version = version_name; + info_.address = image->GetSymAddr(symbol); + info_.symbol = symbol; +} + +} // namespace debug_internal +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h new file mode 100644 index 000000000..7f3dbb971 --- /dev/null +++ b/absl/debugging/internal/elf_mem_image.h @@ -0,0 +1,125 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Allow dynamic symbol lookup for in-memory Elf images. + +#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ +#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ + +// Including this will define the __GLIBC__ macro if glibc is being +// used. +#include + +// Maybe one day we can rewrite this file not to require the elf +// symbol extensions in glibc, but for right now we need them. +#ifdef ABSL_HAVE_ELF_MEM_IMAGE +#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set +#endif + +#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \ + !defined(__asmjs__) +#define ABSL_HAVE_ELF_MEM_IMAGE 1 +#endif + +#if ABSL_HAVE_ELF_MEM_IMAGE + +#include // for ElfW + +namespace absl { +namespace debug_internal { + +// An in-memory ELF image (may not exist on disk). +class ElfMemImage { + public: + // Sentinel: there could never be an elf image at this address. + static const void *const kInvalidBase; + + // Information about a single vdso symbol. + // All pointers are into .dynsym, .dynstr, or .text of the VDSO. + // Do not free() them or modify through them. + struct SymbolInfo { + const char *name; // E.g. "__vdso_getcpu" + const char *version; // E.g. "LINUX_2.6", could be "" + // for unversioned symbol. + const void *address; // Relocated symbol address. + const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table. + }; + + // Supports iteration over all dynamic symbols. + class SymbolIterator { + public: + friend class ElfMemImage; + const SymbolInfo *operator->() const; + const SymbolInfo &operator*() const; + SymbolIterator& operator++(); + bool operator!=(const SymbolIterator &rhs) const; + bool operator==(const SymbolIterator &rhs) const; + private: + SymbolIterator(const void *const image, int index); + void Update(int incr); + SymbolInfo info_; + int index_; + const void *const image_; + }; + + + explicit ElfMemImage(const void *base); + void Init(const void *base); + bool IsPresent() const { return ehdr_ != nullptr; } + const ElfW(Phdr)* GetPhdr(int index) const; + const ElfW(Sym)* GetDynsym(int index) const; + const ElfW(Versym)* GetVersym(int index) const; + const ElfW(Verdef)* GetVerdef(int index) const; + const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const; + const char* GetDynstr(ElfW(Word) offset) const; + const void* GetSymAddr(const ElfW(Sym) *sym) const; + const char* GetVerstr(ElfW(Word) offset) const; + int GetNumSymbols() const; + + SymbolIterator begin() const; + SymbolIterator end() const; + + // Look up versioned dynamic symbol in the image. + // Returns false if image is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out is non-null, additional details are filled in. + bool LookupSymbol(const char *name, const char *version, + int symbol_type, SymbolInfo *info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if image isn't present + // or doesn't have a symbol overlapping given address. + // If info_out is non-null, additional details are filled in. + bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; + + private: + const ElfW(Ehdr) *ehdr_; + const ElfW(Sym) *dynsym_; + const ElfW(Versym) *versym_; + const ElfW(Verdef) *verdef_; + const ElfW(Word) *hash_; + const char *dynstr_; + size_t strsize_; + size_t verdefnum_; + ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD). +}; + +} // namespace debug_internal +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE + +#endif // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc new file mode 100644 index 000000000..c125ea290 --- /dev/null +++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc @@ -0,0 +1,181 @@ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ + +// Generate stack tracer for aarch64 + +#if defined(__linux__) +#include +#include +#include +#endif + +#include +#include +#include +#include + +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems +#include "absl/debugging/stacktrace.h" + +static const uintptr_t kUnknownFrameSize = 0; + +#if defined(__linux__) +// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. +static const unsigned char* GetKernelRtSigreturnAddress() { + constexpr uintptr_t kImpossibleAddress = 1; + static std::atomic memoized{kImpossibleAddress}; + uintptr_t address = memoized.load(std::memory_order_relaxed); + if (address != kImpossibleAddress) { + return reinterpret_cast(address); + } + + address = reinterpret_cast(nullptr); + +#ifdef ABSL_HAVE_VDSO_SUPPORT + absl::debug_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debug_internal::VDSOSupport::SymbolInfo symbol_info; + if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC, + &symbol_info) || + symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing + // or null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + } else { + if (reinterpret_cast(symbol_info.address) != + kImpossibleAddress) { + address = reinterpret_cast(symbol_info.address); + } else { + assert(false && "VDSO returned invalid address"); + } + } + } +#endif + + memoized.store(address, std::memory_order_relaxed); + return reinterpret_cast(address); +} +#endif // __linux__ + +// Compute the size of a stack frame in [low..high). We assume that +// low < high. Return size of kUnknownFrameSize. +template +static inline uintptr_t ComputeStackFrameSize(const T* low, + const T* high) { + const char* low_char_ptr = reinterpret_cast(low); + const char* high_char_ptr = reinterpret_cast(high); + return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize; +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +static void **NextStackFrame(void **old_frame_pointer, const void *uc) { + void **new_frame_pointer = reinterpret_cast(*old_frame_pointer); + bool check_frame_size = true; + +#if defined(__linux__) + if (WITH_CONTEXT && uc != nullptr) { + // Check to see if next frame's return address is __kernel_rt_sigreturn. + if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) { + const ucontext_t *ucv = static_cast(uc); + // old_frame_pointer[0] is not suitable for unwinding, look at + // ucontext to discover frame pointer before signal. + void **const pre_signal_frame_pointer = + reinterpret_cast(ucv->uc_mcontext.regs[29]); + + // Check that alleged frame pointer is actually readable. This is to + // prevent "double fault" in case we hit the first fault due to e.g. + // stack corruption. + if (!absl::debug_internal::AddressIsReadable( + pre_signal_frame_pointer)) + return nullptr; + + // Alleged frame pointer is readable, use it for further unwinding. + new_frame_pointer = pre_signal_frame_pointer; + + // Skip frame size check if we return from a signal. We may be using a + // an alternate stack for signals. + check_frame_size = false; + } + } +#endif + + // aarch64 ABI requires stack pointer to be 16-byte-aligned. + if ((reinterpret_cast(new_frame_pointer) & 15) != 0) + return nullptr; + + // Check frame size. In strict mode, we assume frames to be under + // 100,000 bytes. In non-strict mode, we relax the limit to 1MB. + if (check_frame_size) { + const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const uintptr_t frame_size = + ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); + if (frame_size == kUnknownFrameSize || frame_size > max_size) + return nullptr; + } + + return new_frame_pointer; +} + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { +#ifdef __GNUC__ + void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); +#else +# error reading stack point not yet supported on this platform. +#endif + + skip_count++; // Skip the frame for this function. + int n = 0; + + // The frame pointer points to low address of a frame. The first 64-bit + // word of a frame points to the next frame up the call chain, which normally + // is just after the high address of the current frame. The second word of + // a frame contains return adress of to the caller. To find a pc value + // associated with the current frame, we need to go down a level in the call + // chain. So we remember return the address of the last frame seen. This + // does not work for the first stack frame, which belongs to UnwindImp() but + // we skip the frame for UnwindImp() anyway. + void* prev_return_address = nullptr; + + while (frame_pointer && n < max_depth) { + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few bogus + // entries in some rare cases). + void **next_frame_pointer = + NextStackFrame(frame_pointer, ucp); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = prev_return_address; + if (IS_STACK_FRAMES) { + sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); + } + n++; + } + prev_return_address = frame_pointer[1]; + frame_pointer = next_frame_pointer; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int j = 0; + for (; frame_pointer != nullptr && j < kMaxUnwind; j++) { + frame_pointer = + NextStackFrame(frame_pointer, ucp); + } + *min_dropped_frames = j; + } + return n; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ diff --git a/absl/debugging/internal/stacktrace_arm-inl.inc b/absl/debugging/internal/stacktrace_arm-inl.inc new file mode 100644 index 000000000..566b6d341 --- /dev/null +++ b/absl/debugging/internal/stacktrace_arm-inl.inc @@ -0,0 +1,115 @@ +// Copyright 2011 and onwards Google Inc. +// All rights reserved. +// +// Author: Doug Kwan +// This is inspired by Craig Silverstein's PowerPC stacktrace code. +// + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ + +#include + +#include "absl/debugging/stacktrace.h" + +// WARNING: +// This only works if all your code is in either ARM or THUMB mode. With +// interworking, the frame pointer of the caller can either be in r11 (ARM +// mode) or r7 (THUMB mode). A callee only saves the frame pointer of its +// mode in a fixed location on its stack frame. If the caller is a different +// mode, there is no easy way to find the frame pointer. It can either be +// still in the designated register or saved on stack along with other callee +// saved registers. + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return nullptr if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +static void **NextStackFrame(void **old_sp) { + void **new_sp = (void**) old_sp[-1]; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return nullptr; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return nullptr; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr; + } + if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr; + return new_sp; +} + +// This ensures that absl::GetStackTrace sets up the Link Register properly. +#ifdef __GNUC__ +void StacktraceArmDummyFunction() __attribute__((noinline)); +void StacktraceArmDummyFunction() { __asm__ volatile(""); } +#else +# error StacktraceArmDummyFunction() needs to be ported to this platform. +#endif + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void * /* ucp */, int *min_dropped_frames) { +#ifdef __GNUC__ + void **sp = reinterpret_cast(__builtin_frame_address(0)); +#else +# error reading stack point not yet supported on this platform. +#endif + + // On ARM, the return address is stored in the link register (r14). + // This is not saved on the stack frame of a leaf function. To + // simplify code that reads return addresses, we call a dummy + // function so that the return address of this function is also + // stored in the stack frame. This works at least for gcc. + StacktraceArmDummyFunction(); + + int n = 0; + while (sp && n < max_depth) { + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few bogus + // entries in some rare cases). + void **next_sp = NextStackFrame(sp); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = *sp; + + if (IS_STACK_FRAMES) { + if (next_sp > sp) { + sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + sp = next_sp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int j = 0; + for (; sp != nullptr && j < kMaxUnwind; j++) { + sp = NextStackFrame(sp); + } + *min_dropped_frames = j; + } + return n; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ diff --git a/absl/debugging/internal/stacktrace_config.h b/absl/debugging/internal/stacktrace_config.h new file mode 100644 index 000000000..c0df5bb06 --- /dev/null +++ b/absl/debugging/internal/stacktrace_config.h @@ -0,0 +1,76 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing + * actual unwinder implementation. + * This header is "private" to stacktrace.cc. + * DO NOT include it into any other files. +*/ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ + +// First, test platforms which only support a stub. +#if ABSL_STACKTRACE_INL_HEADER +#error ABSL_STACKTRACE_INL_HEADER cannot be directly set +#elif defined(__native_client__) || defined(__APPLE__) || \ + defined(__ANDROID__) || defined(__myriad2__) || defined(__asmjs__) || \ + defined(__Fuchsia__) || defined(__GENCLAVE__) || \ + defined(GOOGLE_UNSUPPORTED_OS_HERCULES) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_unimplemented-inl.inc" + +// Next, test for Mips and Windows. +// TODO(marmstrong): http://b/21334018: Mips case, remove the check for +// ABSL_STACKTRACE_INL_HEADER. +#elif defined(__mips__) && !defined(ABSL_STACKTRACE_INL_HEADER) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_unimplemented-inl.inc" +#elif defined(_WIN32) // windows +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_win32-inl.inc" + +// Finally, test NO_FRAME_POINTER. +#elif !defined(NO_FRAME_POINTER) +# if defined(__i386__) || defined(__x86_64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_x86-inl.inc" +# elif defined(__ppc__) || defined(__PPC__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_powerpc-inl.inc" +# elif defined(__aarch64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_aarch64-inl.inc" +# elif defined(__arm__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_arm-inl.inc" +# endif +#else // defined(NO_FRAME_POINTER) +# if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_unimplemented-inl.inc" +# elif defined(__ppc__) || defined(__PPC__) +// Use glibc's backtrace. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +# elif defined(__arm__) +# error stacktrace without frame pointer is not supported on ARM +# endif +#endif // NO_FRAME_POINTER + +#if !defined(ABSL_STACKTRACE_INL_HEADER) +#error Not supported yet +#endif + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ diff --git a/absl/debugging/internal/stacktrace_generic-inl.inc b/absl/debugging/internal/stacktrace_generic-inl.inc new file mode 100644 index 000000000..de4881e36 --- /dev/null +++ b/absl/debugging/internal/stacktrace_generic-inl.inc @@ -0,0 +1,51 @@ +// Copyright 2000 - 2007 Google Inc. +// All rights reserved. +// +// Author: Sanjay Ghemawat +// +// Portable implementation - just use glibc +// +// Note: The glibc implementation may cause a call to malloc. +// This can cause a deadlock in HeapProfiler. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ + +#include +#include + +#include "absl/debugging/stacktrace.h" + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + static const int kStackLength = 64; + void * stack[kStackLength]; + int size; + + size = backtrace(stack, kStackLength); + skip_count++; // we want to skip the current frame as well + int result_count = size - skip_count; + if (result_count < 0) + result_count = 0; + if (result_count > max_depth) + result_count = max_depth; + for (int i = 0; i < result_count; i++) + result[i] = stack[i + skip_count]; + + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * result_count); + } + if (min_dropped_frames != nullptr) { + if (size - skip_count - max_depth > 0) { + *min_dropped_frames = size - skip_count - max_depth; + } else { + *min_dropped_frames = 0; + } + } + + return result_count; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ diff --git a/absl/debugging/internal/stacktrace_libunwind-inl.inc b/absl/debugging/internal/stacktrace_libunwind-inl.inc new file mode 100644 index 000000000..e9c2d26a5 --- /dev/null +++ b/absl/debugging/internal/stacktrace_libunwind-inl.inc @@ -0,0 +1,128 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_LIBUNWIND_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_LIBUNWIND_INL_H_ + +// We only need local unwinder. +#define UNW_LOCAL_ONLY + +extern "C" { +#include "third_party/libunwind/include/libunwind.h" +} +#include "absl/debugging/stacktrace.h" + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/raw_logging.h" + +// Sometimes, we can try to get a stack trace from within a stack +// trace, because we don't block signals inside libunwind (which would be too +// expensive: the two extra system calls per stack trace do matter here). +// That can cause a self-deadlock (as in http://b/5722312). +// Protect against such reentrant call by failing to get a stack trace. +// +// We use __thread here because the code here is extremely low level -- it is +// called while collecting stack traces from within malloc and mmap, and thus +// can not call anything which might call malloc or mmap itself. +// In particular, using PerThread or STATIC_THREAD_LOCAL_POD +// here will cause infinite recursion for at least dbg/piii builds with +// crosstool-v12. +static __thread int recursive; + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *, int *min_dropped_frames) { + if (recursive) { + return 0; + } + ++recursive; + + int n = 0; + if (IS_STACK_FRAMES) { + void *ip; + unw_cursor_t cursor; + unw_context_t uc; + unw_word_t sp = 0, next_sp = 0; + + unw_getcontext(&uc); + ABSL_RAW_CHECK(unw_init_local(&cursor, &uc) >= 0, "unw_init_local failed"); + skip_count++; // Do not include current frame + + while (skip_count--) { + if (unw_step(&cursor) <= 0) { + goto out; + } + if (unw_get_reg(&cursor, UNW_REG_SP, &next_sp)) { + goto out; + } + } + + while (n < max_depth) { + if (unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip) < 0) { + break; + } + sizes[n] = 0; + result[n++] = ip; + if (unw_step(&cursor) <= 0) { + break; + } + sp = next_sp; + if (unw_get_reg(&cursor, UNW_REG_SP, &next_sp) , 0) { + break; + } + sizes[n - 1] = next_sp - sp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int j = 0; + for (; j < kMaxUnwind; j++) { + if (unw_step(&cursor) < 0) { + break; + } + } + *min_dropped_frames = j; + } + } else { + skip_count++; // Do not include current frame. + void **result_all = reinterpret_cast( + alloca(sizeof(void*) * (max_depth + skip_count))); + int rc = unw_backtrace(result_all, max_depth + skip_count); + + if (rc > 0) { + // Tell MSan that result_all has been initialized. b/34965936. + ANNOTATE_MEMORY_IS_INITIALIZED(result_all, rc * sizeof(void*)); + } + + if (rc > skip_count) { + memcpy(result, &result_all[skip_count], + sizeof(void*) * (rc - skip_count)); + n = rc - skip_count; + } else { + n = 0; + } + + if (min_dropped_frames != nullptr) { + // Not implemented. + *min_dropped_frames = 0; + } + } + + out: + --recursive; + return n; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_LIBUNWIND_INL_H_ diff --git a/absl/debugging/internal/stacktrace_powerpc-inl.inc b/absl/debugging/internal/stacktrace_powerpc-inl.inc new file mode 100644 index 000000000..0628b285f --- /dev/null +++ b/absl/debugging/internal/stacktrace_powerpc-inl.inc @@ -0,0 +1,234 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produce stack trace. I'm guessing (hoping!) the code is much like +// for x86. For apple machines, at least, it seems to be; see +// http://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html +// http://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK +// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882 + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ + +#if defined(__linux__) +#include // for PT_NIP. +#include // for ucontext_t +#endif + +#include +#include +#include +#include + +#include "absl/base/port.h" +#include "absl/debugging/stacktrace.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems + +// Given a stack pointer, return the saved link register value. +// Note that this is the link register for a callee. +static inline void *StacktracePowerPCGetLR(void **sp) { + // PowerPC has 3 main ABIs, which say where in the stack the + // Link Register is. For DARWIN and AIX (used by apple and + // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc), + // it's in sp[1]. +#if defined(_CALL_AIX) || defined(_CALL_DARWIN) + return *(sp+2); +#elif defined(_CALL_SYSV) + return *(sp+1); +#elif defined(__APPLE__) || (defined(__linux__) && defined(__PPC64__)) + // This check is in case the compiler doesn't define _CALL_AIX/etc. + return *(sp+2); +#elif defined(__linux) + // This check is in case the compiler doesn't define _CALL_SYSV. + return *(sp+1); +#else +#error Need to specify the PPC ABI for your archiecture. +#endif +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_sp, const void *uc) { + void **new_sp = (void **) *old_sp; + enum { kStackAlignment = 16 }; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return nullptr; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return nullptr; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr; + } + if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr; + +#if defined(__linux__) + enum StackTraceKernelSymbolStatus { + kNotInitialized = 0, kAddressValid, kAddressInvalid }; + + if (IS_WITH_CONTEXT && uc != nullptr) { + static StackTraceKernelSymbolStatus kernel_symbol_status = + kNotInitialized; // Sentinel: not computed yet. + // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not + // possibly be there. + static const unsigned char *kernel_sigtramp_rt64_address = nullptr; + if (kernel_symbol_status == kNotInitialized) { + absl::debug_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debug_internal::VDSOSupport::SymbolInfo + sigtramp_rt64_symbol_info; + if (!vdso.LookupSymbol( + "__kernel_sigtramp_rt64", "LINUX_2.6.15", + absl::debug_internal::VDSOSupport::kVDSOSymbolType, + &sigtramp_rt64_symbol_info) || + sigtramp_rt64_symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing + // or null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + kernel_symbol_status = kAddressInvalid; + } else { + kernel_sigtramp_rt64_address = + reinterpret_cast( + sigtramp_rt64_symbol_info.address); + kernel_symbol_status = kAddressValid; + } + } else { + kernel_symbol_status = kAddressInvalid; + } + } + + if (new_sp != nullptr && + kernel_symbol_status == kAddressValid && + StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) { + const ucontext_t* signal_context = + reinterpret_cast(uc); + void **const sp_before_signal = + reinterpret_cast(signal_context->uc_mcontext.gp_regs[PT_R1]); + // Check that alleged sp before signal is nonnull and is reasonably + // aligned. + if (sp_before_signal != nullptr && + ((uintptr_t)sp_before_signal % kStackAlignment) == 0) { + // Check that alleged stack pointer is actually readable. This is to + // prevent a "double fault" in case we hit the first fault due to e.g. + // a stack corruption. + if (absl::debug_internal::AddressIsReadable(sp_before_signal)) { + // Alleged stack pointer is readable, use it for further unwinding. + new_sp = sp_before_signal; + } + } + } + } +#endif + + return new_sp; +} + +// This ensures that absl::GetStackTrace sets up the Link Register properly. +void StacktracePowerPCDummyFunction() __attribute__((noinline)); +void StacktracePowerPCDummyFunction() { __asm__ volatile(""); } + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + void **sp; + // Apple OS X uses an old version of gnu as -- both Darwin 7.9.0 (Panther) + // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a + // different asm syntax. I don't know quite the best way to discriminate + // systems using the old as from the new one; I've gone with __APPLE__. +#ifdef __APPLE__ + __asm__ volatile ("mr %0,r1" : "=r" (sp)); +#else + __asm__ volatile ("mr %0,1" : "=r" (sp)); +#endif + + // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack + // entry that holds the return address of the subroutine call (what + // instruction we run after our function finishes). This is the + // same as the stack-pointer of our parent routine, which is what we + // want here. While the compiler will always(?) set up LR for + // subroutine calls, it may not for leaf functions (such as this one). + // This routine forces the compiler (at least gcc) to push it anyway. + StacktracePowerPCDummyFunction(); + + // The LR save area is used by the callee, so the top entry is bogus. + skip_count++; + + int n = 0; + + // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in + // the link register) of a function call is stored in the caller's stack + // frame instead of the callee's. When we look for the return address + // associated with a stack frame, we need to make sure that there is a + // caller frame before it. So we call NextStackFrame before entering the + // loop below and check next_sp instead of sp for loop termination. + // The outermost frame is set up by runtimes and it does not have a + // caller frame, so it is skipped. + + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few + // bogus entries in some rare cases). + void **next_sp = NextStackFrame(sp, ucp); + + while (next_sp && n < max_depth) { + if (skip_count > 0) { + skip_count--; + } else { + result[n] = StacktracePowerPCGetLR(sp); + if (IS_STACK_FRAMES) { + if (next_sp > sp) { + sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + + sp = next_sp; + next_sp = NextStackFrame(sp, ucp); + } + + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 1000; + int j = 0; + for (; next_sp != nullptr && j < kMaxUnwind; j++) { + next_sp = NextStackFrame(next_sp, ucp); + } + *min_dropped_frames = j; + } + return n; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ diff --git a/absl/debugging/internal/stacktrace_unimplemented-inl.inc b/absl/debugging/internal/stacktrace_unimplemented-inl.inc new file mode 100644 index 000000000..a66be7797 --- /dev/null +++ b/absl/debugging/internal/stacktrace_unimplemented-inl.inc @@ -0,0 +1,14 @@ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ + +template +static int UnwindImpl(void** /* result */, int* /* sizes */, + int /* max_depth */, int /* skip_count */, + const void* /* ucp */, int *min_dropped_frames) { + if (min_dropped_frames != nullptr) { + *min_dropped_frames = 0; + } + return 0; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ diff --git a/absl/debugging/internal/stacktrace_win32-inl.inc b/absl/debugging/internal/stacktrace_win32-inl.inc new file mode 100644 index 000000000..4c7f855bb --- /dev/null +++ b/absl/debugging/internal/stacktrace_win32-inl.inc @@ -0,0 +1,75 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produces a stack trace for Windows. Normally, one could use +// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that +// should work for binaries compiled using MSVC in "debug" mode. +// However, in "release" mode, Windows uses frame-pointer +// optimization, which makes getting a stack trace very difficult. +// +// There are several approaches one can take. One is to use Windows +// intrinsics like StackWalk64. These can work, but have restrictions +// on how successful they can be. Another attempt is to write a +// version of stacktrace_x86-inl.h that has heuristic support for +// dealing with FPO, similar to what WinDbg does (see +// http://www.nynaeve.net/?p=97). There are (non-working) examples of +// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1 +// +// The solution we've ended up doing is to call the undocumented +// windows function RtlCaptureStackBackTrace, which probably doesn't +// work with FPO but at least is fast, and doesn't require a symbol +// server. +// +// This code is inspired by a patch from David Vitek: +// http://code.google.com/p/google-perftools/issues/detail?id=83 + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ + +#include // for GetProcAddress and GetModuleHandle +#include + +typedef USHORT NTAPI RtlCaptureStackBackTrace_Function( + IN ULONG frames_to_skip, + IN ULONG frames_to_capture, + OUT PVOID *backtrace, + OUT PULONG backtrace_hash); + +// Load the function we need at static init time, where we don't have +// to worry about someone else holding the loader's lock. +static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = + (RtlCaptureStackBackTrace_Function*) + GetProcAddress(GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace"); + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + int n = 0; + if (!RtlCaptureStackBackTrace_fn) { + // can't find a stacktrace with no function to call + } else { + n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0); + } + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * n); + } + if (min_dropped_frames != nullptr) { + // Not implemented. + *min_dropped_frames = 0; + } + return n; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc new file mode 100644 index 000000000..6e1af0178 --- /dev/null +++ b/absl/debugging/internal/stacktrace_x86-inl.inc @@ -0,0 +1,327 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produce stack trace + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ + +#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__)) +#include // for ucontext_t +#endif + +#if !defined(_WIN32) +#include +#endif + +#include +#include + +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems +#include "absl/debugging/stacktrace.h" +#include "absl/base/internal/raw_logging.h" + +#if defined(__linux__) && defined(__i386__) +// Count "push %reg" instructions in VDSO __kernel_vsyscall(), +// preceeding "syscall" or "sysenter". +// If __kernel_vsyscall uses frame pointer, answer 0. +// +// kMaxBytes tells how many instruction bytes of __kernel_vsyscall +// to analyze before giving up. Up to kMaxBytes+1 bytes of +// instructions could be accessed. +// +// Here are known __kernel_vsyscall instruction sequences: +// +// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S). +// Used on Intel. +// 0xffffe400 <__kernel_vsyscall+0>: push %ecx +// 0xffffe401 <__kernel_vsyscall+1>: push %edx +// 0xffffe402 <__kernel_vsyscall+2>: push %ebp +// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp +// 0xffffe405 <__kernel_vsyscall+5>: sysenter +// +// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S). +// Used on AMD. +// 0xffffe400 <__kernel_vsyscall+0>: push %ebp +// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp +// 0xffffe403 <__kernel_vsyscall+3>: syscall +// + +// The sequence below isn't actually expected in Google fleet, +// here only for completeness. Remove this comment from OSS release. + +// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S) +// 0xffffe400 <__kernel_vsyscall+0>: int $0x80 +// 0xffffe401 <__kernel_vsyscall+1>: ret +// +static const int kMaxBytes = 10; + +// We use assert()s instead of DCHECK()s -- this is too low level +// for DCHECK(). + +static int CountPushInstructions(const unsigned char *const addr) { + int result = 0; + for (int i = 0; i < kMaxBytes; ++i) { + if (addr[i] == 0x89) { + // "mov reg,reg" + if (addr[i + 1] == 0xE5) { + // Found "mov %esp,%ebp". + return 0; + } + ++i; // Skip register encoding byte. + } else if (addr[i] == 0x0F && + (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) { + // Found "sysenter" or "syscall". + return result; + } else if ((addr[i] & 0xF0) == 0x50) { + // Found "push %reg". + ++result; + } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) { + // Found "int $0x80" + assert(result == 0); + return 0; + } else { + // Unexpected instruction. + assert(false && "unexpected instruction in __kernel_vsyscall"); + return 0; + } + } + // Unexpected: didn't find SYSENTER or SYSCALL in + // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval. + assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall"); + return 0; +} +#endif + +// Assume stack frames larger than 100,000 bytes are bogus. +static const int kMaxFrameBytes = 100000; + +// Returns the stack frame pointer from signal context, 0 if unknown. +// vuc is a ucontext_t *. We use void* to avoid the use +// of ucontext_t on non-POSIX systems. +static uintptr_t GetFP(const void *vuc) { +#if defined(__linux__) + if (vuc != nullptr) { + auto *uc = reinterpret_cast(vuc); +#if defined(__i386__) + const auto bp = uc->uc_mcontext.gregs[REG_EBP]; + const auto sp = uc->uc_mcontext.gregs[REG_ESP]; +#elif defined(__x86_64__) + const auto bp = uc->uc_mcontext.gregs[REG_RBP]; + const auto sp = uc->uc_mcontext.gregs[REG_RSP]; +#else + const uintptr_t bp = 0; + const uintptr_t sp = 0; +#endif + // Sanity-check that the base pointer is valid. It should be as long as + // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in + // the process is compiled with --copt=-fomit-frame-pointer or + // --copt=-momit-leaf-frame-pointer. + // + // TODO(bcmills): -momit-leaf-frame-pointer is currently the default + // behavior when building with clang. Talk to the C++ toolchain team about + // fixing that. + if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp; + + // If bp isn't a plausible frame pointer, return the stack pointer instead. + // If we're lucky, it points to the start of a stack frame; otherwise, we'll + // get one frame of garbage in the stack trace and fail the sanity check on + // the next iteration. + return sp; + } +#endif + return 0; +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_fp, const void *uc) { + void **new_fp = (void **)*old_fp; + +#if defined(__linux__) && defined(__i386__) + if (WITH_CONTEXT && uc != nullptr) { + // How many "push %reg" instructions are there at __kernel_vsyscall? + // This is constant for a given kernel and processor, so compute + // it only once. + static int num_push_instructions = -1; // Sentinel: not computed yet. + // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly + // be there. + static const unsigned char *kernel_rt_sigreturn_address = nullptr; + static const unsigned char *kernel_vsyscall_address = nullptr; + if (num_push_instructions == -1) { + absl::debug_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debug_internal::VDSOSupport::SymbolInfo + rt_sigreturn_symbol_info; + absl::debug_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info; + if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC, + &rt_sigreturn_symbol_info) || + !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC, + &vsyscall_symbol_info) || + rt_sigreturn_symbol_info.address == nullptr || + vsyscall_symbol_info.address == nullptr) { + // Unexpected: 32-bit VDSO is present, yet one of the expected + // symbols is missing or null. + assert(false && "VDSO is present, but doesn't have expected symbols"); + num_push_instructions = 0; + } else { + kernel_rt_sigreturn_address = + reinterpret_cast( + rt_sigreturn_symbol_info.address); + kernel_vsyscall_address = + reinterpret_cast( + vsyscall_symbol_info.address); + num_push_instructions = + CountPushInstructions(kernel_vsyscall_address); + } + } else { + num_push_instructions = 0; + } + } + if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr && + old_fp[1] == kernel_rt_sigreturn_address) { + const ucontext_t *ucv = static_cast(uc); + // This kernel does not use frame pointer in its VDSO code, + // and so %ebp is not suitable for unwinding. + void **const reg_ebp = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_EBP]); + const unsigned char *const reg_eip = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_EIP]); + if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip && + reg_eip - kernel_vsyscall_address < kMaxBytes) { + // We "stepped up" to __kernel_vsyscall, but %ebp is not usable. + // Restore from 'ucv' instead. + void **const reg_esp = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_ESP]); + // Check that alleged %esp is not null and is reasonably aligned. + if (reg_esp && + ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) { + // Check that alleged %esp is actually readable. This is to prevent + // "double fault" in case we hit the first fault due to e.g. stack + // corruption. + void *const reg_esp2 = reg_esp[num_push_instructions - 1]; + if (absl::debug_internal::AddressIsReadable(reg_esp2)) { + // Alleged %esp is readable, use it for further unwinding. + new_fp = reinterpret_cast(reg_esp2); + } + } + } + } + } +#endif + + const uintptr_t old_fp_u = reinterpret_cast(old_fp); + const uintptr_t new_fp_u = reinterpret_cast(new_fp); + + // Check that the transition from frame pointer old_fp to frame + // pointer new_fp isn't clearly bogus. Skip the checks if new_fp + // matches the signal context, so that we don't skip out early when + // using an alternate signal stack. + // + // TODO(bcmills): The GetFP call should be completely unnecessary when + // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's + // stack by this point), but it is empirically still needed (e.g. when the + // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some + // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what + // it's supposed to. + if (STRICT_UNWINDING && + (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_fp_u <= old_fp_u) return nullptr; + if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr; + } else { + if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_fp == old_fp) return nullptr; + } + + if (new_fp_u & (sizeof(void *) - 1)) return nullptr; +#ifdef __i386__ + // On 32-bit machines, the stack pointer can be very close to + // 0xffffffff, so we explicitly check for a pointer into the + // last two pages in the address space + if (new_fp_u >= 0xffffe000) return nullptr; +#endif +#if !defined(_WIN32) + if (!STRICT_UNWINDING) { + // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test + // on AMD-based machines with VDSO-enabled kernels. + // Make an extra sanity check to insure new_fp is readable. + // Note: NextStackFrame() is only called while the program + // is already on its last leg, so it's ok to be slow here. + + if (!absl::debug_internal::AddressIsReadable(new_fp)) { + return nullptr; + } + } +#endif + return new_fp; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +ABSL_ATTRIBUTE_NOINLINE +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + int n = 0; + void **fp = reinterpret_cast(__builtin_frame_address(0)); + + while (fp && n < max_depth) { + if (*(fp + 1) == reinterpret_cast(0)) { + // In 64-bit code, we often see a frame that + // points to itself and has a return address of 0. + break; + } + void **next_fp = NextStackFrame(fp, ucp); + if (skip_count > 0) { + skip_count--; + } else { + result[n] = *(fp + 1); + if (IS_STACK_FRAMES) { + if (next_fp > fp) { + sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + fp = next_fp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 1000; + int j = 0; + for (; fp != nullptr && j < kMaxUnwind; j++) { + fp = NextStackFrame(fp, ucp); + } + *min_dropped_frames = j; + } + return n; +} + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc new file mode 100644 index 000000000..5026e1c1a --- /dev/null +++ b/absl/debugging/internal/vdso_support.cc @@ -0,0 +1,177 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Allow dynamic symbol lookup in the kernel VDSO page. +// +// VDSOSupport -- a class representing kernel VDSO (if present). + +#include "absl/debugging/internal/vdso_support.h" + +#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h + +#include +#include +#include + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/port.h" + +#ifndef AT_SYSINFO_EHDR +#define AT_SYSINFO_EHDR 33 // for crosstoolv10 +#endif + +namespace absl { +namespace debug_internal { + +std::atomic VDSOSupport::vdso_base_( + debug_internal::ElfMemImage::kInvalidBase); +std::atomic VDSOSupport::getcpu_fn_(&InitAndGetCPU); +VDSOSupport::VDSOSupport() + // If vdso_base_ is still set to kInvalidBase, we got here + // before VDSOSupport::Init has been called. Call it now. + : image_(vdso_base_.load(std::memory_order_relaxed) == + debug_internal::ElfMemImage::kInvalidBase + ? Init() + : vdso_base_.load(std::memory_order_relaxed)) {} + +// NOTE: we can't use GoogleOnceInit() below, because we can be +// called by tcmalloc, and none of the *once* stuff may be functional yet. +// +// In addition, we hope that the VDSOSupportHelper constructor +// causes this code to run before there are any threads, and before +// InitGoogle() has executed any chroot or setuid calls. +// +// Finally, even if there is a race here, it is harmless, because +// the operation should be idempotent. +const void *VDSOSupport::Init() { + if (vdso_base_.load(std::memory_order_relaxed) == + debug_internal::ElfMemImage::kInvalidBase) { + { + // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[] + // on stack, and so glibc works as if VDSO was not present. + // But going directly to kernel via /proc/self/auxv below bypasses + // Valgrind zapping. So we check for Valgrind separately. + if (RunningOnValgrind()) { + vdso_base_.store(nullptr, std::memory_order_relaxed); + getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed); + return nullptr; + } + int fd = open("/proc/self/auxv", O_RDONLY); + if (fd == -1) { + // Kernel too old to have a VDSO. + vdso_base_.store(nullptr, std::memory_order_relaxed); + getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed); + return nullptr; + } + ElfW(auxv_t) aux; + while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) { + if (aux.a_type == AT_SYSINFO_EHDR) { + vdso_base_.store(reinterpret_cast(aux.a_un.a_val), + std::memory_order_relaxed); + break; + } + } + close(fd); + } + if (vdso_base_.load(std::memory_order_relaxed) == + debug_internal::ElfMemImage::kInvalidBase) { + // Didn't find AT_SYSINFO_EHDR in auxv[]. + vdso_base_.store(nullptr, std::memory_order_relaxed); + } + } + GetCpuFn fn = &GetCPUViaSyscall; // default if VDSO not present. + if (vdso_base_.load(std::memory_order_relaxed)) { + VDSOSupport vdso; + SymbolInfo info; + if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) { + fn = reinterpret_cast(const_cast(info.address)); + } + } + // Subtle: this code runs outside of any locks; prevent compiler + // from assigning to getcpu_fn_ more than once. + getcpu_fn_.store(fn, std::memory_order_relaxed); + return vdso_base_.load(std::memory_order_relaxed); +} + +const void *VDSOSupport::SetBase(const void *base) { + ABSL_RAW_CHECK(base != debug_internal::ElfMemImage::kInvalidBase, + "internal error"); + const void *old_base = vdso_base_.load(std::memory_order_relaxed); + vdso_base_.store(base, std::memory_order_relaxed); + image_.Init(base); + // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO. + getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed); + return old_base; +} + +bool VDSOSupport::LookupSymbol(const char *name, + const char *version, + int type, + SymbolInfo *info) const { + return image_.LookupSymbol(name, version, type, info); +} + +bool VDSOSupport::LookupSymbolByAddress(const void *address, + SymbolInfo *info_out) const { + return image_.LookupSymbolByAddress(address, info_out); +} + +// NOLINT on 'long' because this routine mimics kernel api. +long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int) + void *, void *) { +#ifdef SYS_getcpu + return syscall(SYS_getcpu, cpu, nullptr, nullptr); +#else + // x86_64 never implemented sys_getcpu(), except as a VDSO call. + errno = ENOSYS; + return -1; +#endif +} + +// Use fast __vdso_getcpu if available. +long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int) + void *x, void *y) { + Init(); + GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed); + ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_"); + return (*fn)(cpu, x, y); +} + +// This function must be very fast, and may be called from very +// low level (e.g. tcmalloc). Hence I avoid things like +// GoogleOnceInit() and ::operator new. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +int GetCPU() { + unsigned cpu; + int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr); + return ret_code == 0 ? cpu : ret_code; +} + +// We need to make sure VDSOSupport::Init() is called before +// InitGoogle() does any setuid or chroot calls. If VDSOSupport +// is used in any global constructor, this will happen, since +// VDSOSupport's constructor calls Init. But if not, we need to +// ensure it here, with a global constructor of our own. This +// is an allowed exception to the normal rule against non-trivial +// global constructors. +static class VDSOInitHelper { + public: + VDSOInitHelper() { VDSOSupport::Init(); } +} vdso_init_helper; + +} // namespace debug_internal +} // namespace absl + +#endif // ABSL_HAVE_VDSO_SUPPORT diff --git a/absl/debugging/internal/vdso_support.h b/absl/debugging/internal/vdso_support.h new file mode 100644 index 000000000..a6a7a1779 --- /dev/null +++ b/absl/debugging/internal/vdso_support.h @@ -0,0 +1,155 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Allow dynamic symbol lookup in the kernel VDSO page. +// +// VDSO stands for "Virtual Dynamic Shared Object" -- a page of +// executable code, which looks like a shared library, but doesn't +// necessarily exist anywhere on disk, and which gets mmap()ed into +// every process by kernels which support VDSO, such as 2.6.x for 32-bit +// executables, and 2.6.24 and above for 64-bit executables. +// +// More details could be found here: +// http://www.trilithium.com/johan/2005/08/linux-gate/ +// +// VDSOSupport -- a class representing kernel VDSO (if present). +// +// Example usage: +// VDSOSupport vdso; +// VDSOSupport::SymbolInfo info; +// typedef (*FN)(unsigned *, void *, void *); +// FN fn = nullptr; +// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) { +// fn = reinterpret_cast(info.address); +// } + +#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ +#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ + +#include + +#include "absl/debugging/internal/elf_mem_image.h" + +#ifdef ABSL_HAVE_ELF_MEM_IMAGE + +#ifdef ABSL_HAVE_VDSO_SUPPORT +#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set +#else +#define ABSL_HAVE_VDSO_SUPPORT 1 +#endif + +namespace absl { +namespace debug_internal { + +// NOTE: this class may be used from within tcmalloc, and can not +// use any memory allocation routines. +class VDSOSupport { + public: + VDSOSupport(); + + typedef ElfMemImage::SymbolInfo SymbolInfo; + typedef ElfMemImage::SymbolIterator SymbolIterator; + + // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE + // depending on how the kernel is built. The kernel is normally built with + // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a + // compile-time constant. +#ifdef __powerpc64__ + enum { kVDSOSymbolType = STT_NOTYPE }; +#else + enum { kVDSOSymbolType = STT_FUNC }; +#endif + + // Answers whether we have a vdso at all. + bool IsPresent() const { return image_.IsPresent(); } + + // Allow to iterate over all VDSO symbols. + SymbolIterator begin() const { return image_.begin(); } + SymbolIterator end() const { return image_.end(); } + + // Look up versioned dynamic symbol in the kernel VDSO. + // Returns false if VDSO is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbol(const char *name, const char *version, + int symbol_type, SymbolInfo *info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if VDSO isn't present + // or doesn't have a symbol overlapping given address. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; + + // Used only for testing. Replace real VDSO base with a mock. + // Returns previous value of vdso_base_. After you are done testing, + // you are expected to call SetBase() with previous value, in order to + // reset state to the way it was. + const void *SetBase(const void *s); + + // Computes vdso_base_ and returns it. Should be called as early as + // possible; before any thread creation, chroot or setuid. + static const void *Init(); + + private: + // image_ represents VDSO ELF image in memory. + // image_.ehdr_ == nullptr implies there is no VDSO. + ElfMemImage image_; + + // Cached value of auxv AT_SYSINFO_EHDR, computed once. + // This is a tri-state: + // kInvalidBase => value hasn't been determined yet. + // 0 => there is no VDSO. + // else => vma of VDSO Elf{32,64}_Ehdr. + // + // When testing with mock VDSO, low bit is set. + // The low bit is always available because vdso_base_ is + // page-aligned. + static std::atomic vdso_base_; + + // NOLINT on 'long' because these routines mimic kernel api. + // The 'cache' parameter may be used by some versions of the kernel, + // and should be nullptr or point to a static buffer containing at + // least two 'long's. + static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'. + void *unused); + static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'. + void *unused); + typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'. + void *unused); + + // This function pointer may point to InitAndGetCPU, + // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization. + static std::atomic getcpu_fn_; + + friend int GetCPU(void); // Needs access to getcpu_fn_. + + VDSOSupport(const VDSOSupport&) = delete; + VDSOSupport& operator=(const VDSOSupport&) = delete; +}; + +// Same as sched_getcpu() on later glibc versions. +// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present, +// otherwise use syscall(SYS_getcpu,...). +// May return -1 with errno == ENOSYS if the kernel doesn't +// support SYS_getcpu. +int GetCPU(); + +} // namespace debug_internal +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE + +#endif // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ diff --git a/absl/debugging/leak_check.cc b/absl/debugging/leak_check.cc new file mode 100644 index 000000000..30e7e8a85 --- /dev/null +++ b/absl/debugging/leak_check.cc @@ -0,0 +1,35 @@ +// Wrappers around lsan_interface functions. +// When lsan is not linked in, these functions are not available, +// therefore Abseil code which depends on these functions is conditioned on the +// definition of LEAK_SANITIZER. +#include "absl/debugging/leak_check.h" + +#ifndef LEAK_SANITIZER + +namespace absl { +bool HaveLeakSanitizer() { return false; } +void DoIgnoreLeak(const void*) { } +void RegisterLivePointers(const void*, size_t) { } +void UnRegisterLivePointers(const void*, size_t) { } +LeakCheckDisabler::LeakCheckDisabler() { } +LeakCheckDisabler::~LeakCheckDisabler() { } +} // namespace absl + +#else + +#include + +namespace absl { +bool HaveLeakSanitizer() { return true; } +void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); } +void RegisterLivePointers(const void* ptr, size_t size) { + __lsan_register_root_region(ptr, size); +} +void UnRegisterLivePointers(const void* ptr, size_t size) { + __lsan_unregister_root_region(ptr, size); +} +LeakCheckDisabler::LeakCheckDisabler() { __lsan_disable(); } +LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); } +} // namespace absl + +#endif // LEAK_SANITIZER diff --git a/absl/debugging/leak_check.h b/absl/debugging/leak_check.h new file mode 100644 index 000000000..f67fe88a0 --- /dev/null +++ b/absl/debugging/leak_check.h @@ -0,0 +1,111 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: leak_check.h +// ----------------------------------------------------------------------------- +// +// This package contains functions that affect leak checking behavior within +// targets built with the LeakSanitizer (LSan), a memory leak detector that is +// integrated within the AddressSanitizer (ASan) as an additional component, or +// which can be used standalone. LSan and ASan are included or can be provided +// as additional components for most compilers such as Clang, gcc and MSVC. +// Note: this leak checking API is not yet supported in MSVC. +// Leak checking is enabled by default in all ASan builds. +// +// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// ----------------------------------------------------------------------------- +#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_ +#define ABSL_DEBUGGING_LEAK_CHECK_H_ + +#include + +namespace absl { + +// HaveLeakSanitizer() +// +// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is +// currently built into this target. +bool HaveLeakSanitizer(); + +// DoIgnoreLeak() +// +// Implements `IgnoreLeak()` below. This function should usually +// not be called directly; calling `IgnoreLeak()` is preferred. +void DoIgnoreLeak(const void* ptr); + +// IgnoreLeak() +// +// Instruct the leak sanitizer to ignore leak warnings on the object referenced +// by the passed pointer, as well as all heap objects transitively referenced +// by it. The passed object pointer can point to either the beginning of the +// object or anywhere within it. +// +// Example: +// +// static T* obj = IgnoreLeak(new T(...)); +// +// If the passed `ptr` does not point to an actively allocated object at the +// time `IgnoreLeak()` is called, the call is a no-op; if it is actively +// allocated, the object must not get deallocated later. +// +template +T* IgnoreLeak(T* ptr) { + DoIgnoreLeak(ptr); + return ptr; +} + +// LeakCheckDisabler +// +// This helper class indicates that any heap allocations done in the code block +// covered by the scoped object, which should be allocated on the stack, will +// not be reported as leaks. Leak check disabling will occur within the code +// block and any nested function calls within the code block. +// +// Example: +// +// void Foo() { +// LeakCheckDisabler disabler; +// ... code that allocates objects whose leaks should be ignored ... +// } +// +// REQUIRES: Destructor runs in same thread as constructor +class LeakCheckDisabler { + public: + LeakCheckDisabler(); + LeakCheckDisabler(const LeakCheckDisabler&) = delete; + LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete; + ~LeakCheckDisabler(); +}; + +// RegisterLivePointers() +// +// Registers `ptr[0,size-1]` as pointers to memory that is still actively being +// referenced and for which leak checking should be ignored. This function is +// useful if you store pointers in mapped memory, for memory ranges that we know +// are correct but for which normal analysis would flag as leaked code. +void RegisterLivePointers(const void* ptr, size_t size); + +// UnRegisterLivePointers() +// +// Deregisters the pointers previously marked as active in +// `RegisterLivePointers()`, enabling leak checking of those pointers. +void UnRegisterLivePointers(const void* ptr, size_t size); + +} // namespace absl + +#endif // ABSL_DEBUGGING_LEAK_CHECK_H_ diff --git a/absl/debugging/leak_check_disable.cc b/absl/debugging/leak_check_disable.cc new file mode 100644 index 000000000..df22c1cae --- /dev/null +++ b/absl/debugging/leak_check_disable.cc @@ -0,0 +1,20 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Disable LeakSanitizer when this file is linked in. +// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h +extern "C" int __lsan_is_turned_off(); +extern "C" int __lsan_is_turned_off() { + return 1; +} diff --git a/absl/debugging/leak_check_fail_test.cc b/absl/debugging/leak_check_fail_test.cc new file mode 100644 index 000000000..bf541fe84 --- /dev/null +++ b/absl/debugging/leak_check_fail_test.cc @@ -0,0 +1,41 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/leak_check.h" + +namespace { + +TEST(LeakCheckTest, LeakMemory) { + // This test is expected to cause lsan failures on program exit. Therefore the + // test will be run only by leak_check_test.sh, which will verify a + // failed exit code. + + char* foo = strdup("lsan should complain about this leaked string"); + ABSL_RAW_LOG(INFO, "Should detect leaked std::string %s", foo); +} + +TEST(LeakCheckTest, LeakMemoryAfterDisablerScope) { + // This test is expected to cause lsan failures on program exit. Therefore the + // test will be run only by external_leak_check_test.sh, which will verify a + // failed exit code. + { absl::LeakCheckDisabler disabler; } + char* foo = strdup("lsan should also complain about this leaked string"); + ABSL_RAW_LOG(INFO, "Re-enabled leak detection.Should detect leaked std::string %s", + foo); +} + +} // namespace diff --git a/absl/debugging/leak_check_test.cc b/absl/debugging/leak_check_test.cc new file mode 100644 index 000000000..27ca2d1b7 --- /dev/null +++ b/absl/debugging/leak_check_test.cc @@ -0,0 +1,41 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/leak_check.h" + +namespace { + +TEST(LeakCheckTest, DetectLeakSanitizer) { +#ifdef ABSL_EXPECT_LEAK_SANITIZER + EXPECT_TRUE(absl::HaveLeakSanitizer()); +#else + EXPECT_FALSE(absl::HaveLeakSanitizer()); +#endif +} + +TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) { + auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string")); + ABSL_RAW_LOG(INFO, "Ignoring leaked std::string %s", foo->c_str()); +} + +TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) { + absl::LeakCheckDisabler disabler; + auto foo = new std::string("some std::string leaked while checks are disabled"); + ABSL_RAW_LOG(INFO, "Ignoring leaked std::string %s", foo->c_str()); +} + +} // namespace diff --git a/absl/debugging/stacktrace.cc b/absl/debugging/stacktrace.cc new file mode 100644 index 000000000..6c1690426 --- /dev/null +++ b/absl/debugging/stacktrace.cc @@ -0,0 +1,133 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Produce stack trace. +// +// There are three different ways we can try to get the stack trace: +// +// 1) Our hand-coded stack-unwinder. This depends on a certain stack +// layout, which is used by gcc (and those systems using a +// gcc-compatible ABI) on x86 systems, at least since gcc 2.95. +// It uses the frame pointer to do its work. +// +// 2) The libunwind library. This is still in development, and as a +// separate library adds a new dependency, but doesn't need a frame +// pointer. It also doesn't call malloc. +// +// 3) The gdb unwinder -- also the one used by the c++ exception code. +// It's obviously well-tested, but has a fatal flaw: it can call +// malloc() from the unwinder. This is a problem because we're +// trying to use the unwinder to instrument malloc(). +// +// Note: if you add a new implementation here, make sure it works +// correctly when absl::GetStackTrace() is called with max_depth == 0. +// Some code may do that. + +#include "absl/debugging/stacktrace.h" + +#include + +#include "absl/base/port.h" +#include "absl/debugging/internal/stacktrace_config.h" + +#if defined(ABSL_STACKTRACE_INL_HEADER) +#include ABSL_STACKTRACE_INL_HEADER +#else +# error Cannot calculate stack trace: will need to write for your environment +# include "absl/debugging/internal/stacktrace_x86-inl.inc" +# include "absl/debugging/internal/stacktrace_win32-inl.inc" +# include "absl/debugging/internal/stacktrace_unimplemented-inl.inc" +# include "absl/debugging/internal/stacktrace_libunwind-inl.inc" +# include "absl/debugging/internal/stacktrace_generic-inl.inc" +# include "absl/debugging/internal/stacktrace_powerpc-inl.inc" +# include "absl/debugging/internal/stacktrace_arm-inl.inc" +# include "absl/debugging/internal/stacktrace_aarch64-inl.inc" +#endif + +namespace absl { +namespace { + +typedef int (*Unwinder)(void**, int*, int, int, const void*, int*); +std::atomic custom; + +template +ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes, + int max_depth, int skip_count, + const void* uc, + int* min_dropped_frames) { + Unwinder f = &UnwindImpl; + Unwinder g = custom.load(std::memory_order_acquire); + if (g != nullptr) f = g; + + // Add 1 to skip count for the unwinder function itself + int size = (*f)(result, sizes, max_depth, skip_count + 1, uc, + min_dropped_frames); + // To disable tail call to (*f)(...) + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); + return size; +} + +} // anonymous namespace + +int GetStackFrames(void** result, int* sizes, int max_depth, int skip_count) { + return Unwind(result, sizes, max_depth, skip_count, nullptr, + nullptr); +} + +int GetStackFramesWithContext(void** result, int* sizes, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames) { + return Unwind(result, sizes, max_depth, skip_count, uc, + min_dropped_frames); +} + +int GetStackTrace(void** result, int max_depth, int skip_count) { + return Unwind(result, nullptr, max_depth, skip_count, nullptr, + nullptr); +} + +int GetStackTraceWithContext(void** result, int max_depth, int skip_count, + const void* uc, int* min_dropped_frames) { + return Unwind(result, nullptr, max_depth, skip_count, uc, + min_dropped_frames); +} + +void SetStackUnwinder(Unwinder w) { + custom.store(w, std::memory_order_release); +} + +int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip, + const void* uc, int* min_dropped_frames) { + skip++; // For this function + Unwinder f = nullptr; + if (sizes == nullptr) { + if (uc == nullptr) { + f = &UnwindImpl; + } else { + f = &UnwindImpl; + } + } else { + if (uc == nullptr) { + f = &UnwindImpl; + } else { + f = &UnwindImpl; + } + } + volatile int x = 0; + int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames); + x = 1; (void) x; // To disable tail call to (*f)(...) + return n; +} + +} // namespace absl diff --git a/absl/debugging/stacktrace.h b/absl/debugging/stacktrace.h new file mode 100644 index 000000000..0aab5749b --- /dev/null +++ b/absl/debugging/stacktrace.h @@ -0,0 +1,160 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Routines to extract the current stack trace. These functions are +// thread-safe and async-signal-safe. +// Note that stack trace functionality is platform dependent and requires +// additional support from the compiler/build system in many cases. (That is, +// this generally only works on platforms/builds that have been specifically +// configured to support it.) + +#ifndef ABSL_DEBUGGING_STACKTRACE_H_ +#define ABSL_DEBUGGING_STACKTRACE_H_ + +namespace absl { + +// Skips the most recent "skip_count" stack frames (also skips the +// frame generated for the "absl::GetStackFrames" routine itself), and then +// records the pc values for up to the next "max_depth" frames in +// "result", and the corresponding stack frame sizes in "sizes". +// Returns the number of values recorded in "result"/"sizes". +// +// Example: +// main() { foo(); } +// foo() { bar(); } +// bar() { +// void* result[10]; +// int sizes[10]; +// int depth = absl::GetStackFrames(result, sizes, 10, 1); +// } +// +// The absl::GetStackFrames call will skip the frame for "bar". It will +// return 2 and will produce pc values that map to the following +// procedures: +// result[0] foo +// result[1] main +// (Actually, there may be a few more entries after "main" to account for +// startup procedures.) +// And corresponding stack frame sizes will also be recorded: +// sizes[0] 16 +// sizes[1] 16 +// (Stack frame sizes of 16 above are just for illustration purposes.) +// Stack frame sizes of 0 or less indicate that those frame sizes couldn't +// be identified. +// +// This routine may return fewer stack frame entries than are +// available. Also note that "result" and "sizes" must both be non-null. +extern int GetStackFrames(void** result, int* sizes, int max_depth, + int skip_count); + +// Same as above, but to be used from a signal handler. The "uc" parameter +// should be the pointer to ucontext_t which was passed as the 3rd parameter +// to sa_sigaction signal handler. It may help the unwinder to get a +// better stack trace under certain conditions. The "uc" may safely be null. +// +// If min_dropped_frames is not null, stores in *min_dropped_frames a +// lower bound on the number of dropped stack frames. The stored value is +// guaranteed to be >= 0. The number of real stack frames is guaranteed to +// be >= skip_count + max_depth + *min_dropped_frames. +extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames); + +// This is similar to the absl::GetStackFrames routine, except that it returns +// the stack trace only, and not the stack frame sizes as well. +// Example: +// main() { foo(); } +// foo() { bar(); } +// bar() { +// void* result[10]; +// int depth = absl::GetStackTrace(result, 10, 1); +// } +// +// This produces: +// result[0] foo +// result[1] main +// .... ... +// +// "result" must not be null. +extern int GetStackTrace(void** result, int max_depth, int skip_count); + +// Same as above, but to be used from a signal handler. The "uc" parameter +// should be the pointer to ucontext_t which was passed as the 3rd parameter +// to sa_sigaction signal handler. It may help the unwinder to get a +// better stack trace under certain conditions. The "uc" may safely be null. +// +// If min_dropped_frames is not null, stores in *min_dropped_frames a +// lower bound on the number of dropped stack frames. The stored value is +// guaranteed to be >= 0. The number of real stack frames is guaranteed to +// be >= skip_count + max_depth + *min_dropped_frames. +extern int GetStackTraceWithContext(void** result, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames); + +// Call this to provide a custom function for unwinding stack frames +// that will be used every time someone invokes one of the static +// GetStack{Frames,Trace}{,WithContext}() functions above. +// +// The arguments passed to the unwinder function will match the +// arguments passed to absl::GetStackFramesWithContext() except that sizes +// will be non-null iff the caller is interested in frame sizes. +// +// If unwinder is null, we revert to the default stack-tracing behavior. +// +// **************************************************************** +// WARNINGS +// +// absl::SetStackUnwinder is not suitable for general purpose use. It is +// provided for custom runtimes. +// Some things to watch out for when calling absl::SetStackUnwinder: +// +// (a) The unwinder may be called from within signal handlers and +// therefore must be async-signal-safe. +// +// (b) Even after a custom stack unwinder has been unregistered, other +// threads may still be in the process of using that unwinder. +// Therefore do not clean up any state that may be needed by an old +// unwinder. +// **************************************************************** +extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes, + int max_depth, int skip_count, + const void* uc, + int* min_dropped_frames)); + +// Function that exposes built-in stack-unwinding behavior, ignoring +// any calls to absl::SetStackUnwinder(). +// +// pcs must NOT be null. +// +// sizes may be null. +// uc may be null. +// min_dropped_frames may be null. +// +// The semantics are the same as the corresponding GetStack*() function in the +// case where absl::SetStackUnwinder() was never called. Equivalents are: +// +// null sizes | non-nullptr sizes +// |==========================================================| +// null uc | GetStackTrace() | GetStackFrames() | +// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() | +// |==========================================================| +extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames); + +} // namespace absl + +#endif // ABSL_DEBUGGING_STACKTRACE_H_ diff --git a/absl/memory/BUILD.bazel b/absl/memory/BUILD.bazel new file mode 100644 index 000000000..91fc55fc4 --- /dev/null +++ b/absl/memory/BUILD.bazel @@ -0,0 +1,47 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_TEST_COPTS", +) +load( + "//absl:test_dependencies.bzl", + "GUNIT_MAIN_DEPS_SELECTOR", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "memory", + hdrs = ["memory.h"], + copts = ABSL_DEFAULT_COPTS, + deps = ["//absl/meta:type_traits"], +) + +cc_test( + name = "memory_test", + srcs = ["memory_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":memory", + "//absl/base", + "//absl/base:core_headers", + ] + select(GUNIT_MAIN_DEPS_SELECTOR), +) diff --git a/absl/memory/README.md b/absl/memory/README.md new file mode 100644 index 000000000..72eddd9c0 --- /dev/null +++ b/absl/memory/README.md @@ -0,0 +1,22 @@ +# ABSL Memory + +This directory contains packages related to abstractions for managing memory +within objects. + +## Library Listing + +Only one library target exists within this directory at this time: + +* **memory** (`//absl/memory:memory`) provides classes and + utility functions for managing memory associated with pointers. + + +## Memory Library File Listing + +The following header files are directly included within the +`absl::memory` library: + +### Smart Pointer Management + +* `memory.h` +
Pointer memory management abstractions for handling unique pointers diff --git a/absl/memory/memory.h b/absl/memory/memory.h new file mode 100644 index 000000000..c67996084 --- /dev/null +++ b/absl/memory/memory.h @@ -0,0 +1,622 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: memory.h +// ----------------------------------------------------------------------------- +// +// This header file contains utility functions for managing the creation and +// conversion of smart pointers. This file is an extension to the C++ +// standard library header file. + +#ifndef ABSL_MEMORY_MEMORY_H_ +#define ABSL_MEMORY_MEMORY_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { + +// ----------------------------------------------------------------------------- +// Function Template: WrapUnique() +// ----------------------------------------------------------------------------- +// +// Transfers ownership of a raw pointer to a `std::unique_ptr`. The returned +// value is a `std::unique_ptr` of deduced type. +// +// Example: +// X* NewX(int, int); +// auto x = WrapUnique(NewX(1, 2)); // 'x' is std::unique_ptr. +// +// `absl::WrapUnique` is useful for capturing the output of a raw pointer +// factory. However, prefer 'absl::make_unique(args...) over +// 'absl::WrapUnique(new T(args...))'. +// +// auto x = WrapUnique(new X(1, 2)); // works, but nonideal. +// auto x = make_unique(1, 2); // safer, standard, avoids raw 'new'. +// +// Note that `absl::WrapUnique(p)` is valid only if `delete p` is a valid +// expression. In particular, `absl::WrapUnique()` cannot wrap pointers to +// arrays, functions or void, and it must not be used to capture pointers +// obtained from array-new expressions (even though that would compile!). +template +std::unique_ptr WrapUnique(T* ptr) { + static_assert(!std::is_array::value, "array types are unsupported"); + static_assert(std::is_object::value, "non-object types are unsupported"); + return std::unique_ptr(ptr); +} + +namespace memory_internal { + +// Traits to select proper overload and return type for `absl::make_unique<>`. +template +struct MakeUniqueResult { + using scalar = std::unique_ptr; +}; +template +struct MakeUniqueResult { + using array = std::unique_ptr; +}; +template +struct MakeUniqueResult { + using invalid = void; +}; + +} // namespace memory_internal + +// ----------------------------------------------------------------------------- +// Function Template: make_unique() +// ----------------------------------------------------------------------------- +// +// Creates a `std::unique_ptr<>`, while avoiding issues creating temporaries +// during the construction process. `absl::make_unique<>` also avoids redundant +// type declarations, by avoiding the need to explicitly use the `new` operator. +// +// This implementation of `absl::make_unique<>` is designed for C++11 code and +// will be replaced in C++14 by the equivalent `std::make_unique<>` abstraction. +// `absl::make_unique<>` is designed to be 100% compatible with +// `std::make_unique<>` so that the eventual migration will involve a simple +// rename operation. +// +// For more background on why `std::unique_ptr(new T(a,b))` is problematic, +// see Herb Sutter's explanation on +// (Exception-Safe Function Calls)[http://herbsutter.com/gotw/_102/]. +// (In general, reviewers should treat `new T(a,b)` with scrutiny.) +// +// Example usage: +// +// auto p = make_unique(args...); // 'p' is a std::unique_ptr +// auto pa = make_unique(5); // 'pa' is a std::unique_ptr +// +// Three overloads of `absl::make_unique` are required: +// +// - For non-array T: +// +// Allocates a T with `new T(std::forward args...)`, +// forwarding all `args` to T's constructor. +// Returns a `std::unique_ptr` owning that object. +// +// - For an array of unknown bounds T[]: +// +// `absl::make_unique<>` will allocate an array T of type U[] with +// `new U[n]()` and return a `std::unique_ptr` owning that array. +// +// Note that 'U[n]()' is different from 'U[n]', and elements will be +// value-initialized. Note as well that `std::unique_ptr` will perform its +// own destruction of the array elements upon leaving scope, even though +// the array [] does not have a default destructor. +// +// NOTE: an array of unknown bounds T[] may still be (and often will be) +// initialized to have a size, and will still use this overload. E.g: +// +// auto my_array = absl::make_unique(10); +// +// - For an array of known bounds T[N]: +// +// `absl::make_unique<>` is deleted (like with `std::make_unique<>`) as +// this overload is not useful. +// +// NOTE: an array of known bounds T[N] is not considered a useful +// construction, and may cause undefined behavior in templates. E.g: +// +// auto my_array = absl::make_unique(); +// +// In those cases, of course, you can still use the overload above and +// simply initialize it to its desired size: +// +// auto my_array = absl::make_unique(10); + +// `absl::make_unique` overload for non-array types. +template +typename memory_internal::MakeUniqueResult::scalar make_unique( + Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); +} + +// `absl::make_unique` overload for an array T[] of unknown bounds. +// The array allocation needs to use the `new T[size]` form and cannot take +// element constructor arguments. The `std::unique_ptr` will manage destructing +// these array elements. +template +typename memory_internal::MakeUniqueResult::array make_unique(size_t n) { + return std::unique_ptr(new typename absl::remove_extent_t[n]()); +} + +// `absl::make_unique` overload for an array T[N] of known bounds. +// This construction will be rejected. +template +typename memory_internal::MakeUniqueResult::invalid make_unique( + Args&&... /* args */) = delete; + +// ----------------------------------------------------------------------------- +// Function Template: RawPtr() +// ----------------------------------------------------------------------------- +// +// Extracts the raw pointer from a pointer-like 'ptr'. `absl::RawPtr` is useful +// within templates that need to handle a complement of raw pointers, +// `std::nullptr_t`, and smart pointers. +template +auto RawPtr(T&& ptr) -> decltype(&*ptr) { + // ptr is a forwarding reference to support Ts with non-const operators. + return (ptr != nullptr) ? &*ptr : nullptr; +} +inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; } + +// ----------------------------------------------------------------------------- +// Function Template: ShareUniquePtr() +// ----------------------------------------------------------------------------- +// +// Transforms a `std::unique_ptr` rvalue into a `std::shared_ptr`. The returned +// value is a `std::shared_ptr` of deduced type and ownership is transferred to +// the shared pointer. +// +// Example: +// +// auto up = absl::make_unique(10); +// auto sp = absl::ShareUniquePtr(std::move(up)); // shared_ptr +// CHECK_EQ(*sp, 10); +// CHECK(up == nullptr); +// +// Note that this conversion is correct even when T is an array type, although +// the resulting shared pointer may not be very useful. +// +// Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a +// null shared pointer does not attempt to call the deleter. +template +std::shared_ptr ShareUniquePtr(std::unique_ptr&& ptr) { + return ptr ? std::shared_ptr(std::move(ptr)) : std::shared_ptr(); +} + +// ----------------------------------------------------------------------------- +// Function Template: WeakenPtr() +// ----------------------------------------------------------------------------- +// +// Creates a weak pointer associated with a given shared pointer. The returned +// value is a `std::weak_ptr` of deduced type. +// +// Example: +// +// auto sp = std::make_shared(10); +// auto wp = absl::WeakenPtr(sp); +// CHECK_EQ(sp.get(), wp.lock().get()); +// sp.reset(); +// CHECK(wp.lock() == nullptr); +// +template +std::weak_ptr WeakenPtr(const std::shared_ptr& ptr) { + return std::weak_ptr(ptr); +} + +namespace memory_internal { + +// ExtractOr::type evaluates to E if possible. Otherwise, D. +template