- ed0ba496fe01eb8edfa86beade8a37768e7c12ef Updates the API for Exception Safety testing to use build... by Abseil Team <absl-team@google.com>
- c4b7a4e517c9404932c45f2f9f92eb7dc694e45d Internal change by Abseil Team <absl-team@google.com> - 76c78ed9385f65d881511645446e0bb8ababf6ec Add missing ABSL_PREDICT_FALSE to one of FixedArray::at()... by Abseil Team <absl-team@google.com> - 1204fb1c46f007dd9dfb7d9abf3e96c58835d193 Internal change. by Greg Falcon <gfalcon@google.com> - f1f47c98a026bc5e425ae83ff4a2eb391bbd3d9b Add internal-only functionality to examine the stack, to ... by Derek Mauro <dmauro@google.com> - 30d63097cd268d912f917526f6511005580465c4 fix typo by Abseil Team <absl-team@google.com> - 942d7efa6cf54cd248ca57dcaf3c245188b52a76 Remove unnecessary semicolons from comment examples. by Abseil Team <absl-team@google.com> - 7db0669cf23a06d934d3ed8c76aee4e4e23b7e04 Remove malloc_hook and malloc_extension from our internal... by Greg Falcon <gfalcon@google.com> - 0190f1063d101b1ded355019df2e1d325931f6c7 Make the maximum length of a string view equal difference... by Abseil Team <absl-team@google.com> - c8ae37cbce29449b02115a0ebd435ddc3d7ab062 Add namespace qualification. by Shaindel Schwartz <shaindel@google.com> - ff70afe2e6e3dd39f51ce9829e3e1f18231bf4d7 Fix internal/direct_mmap.h for non-linux builds. by Greg Falcon <gfalcon@google.com> GitOrigin-RevId: ed0ba496fe01eb8edfa86beade8a37768e7c12ef Change-Id: I7595ee3480d1d6724fd3797c15ba9d9be0d17e62
This commit is contained in:
parent
a7e522daf1
commit
5b53540166
35 changed files with 831 additions and 2601 deletions
|
@ -77,42 +77,16 @@ cc_library(
|
|||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "malloc_extension",
|
||||
srcs = ["internal/malloc_extension.cc"],
|
||||
hdrs = [
|
||||
"internal/malloc_extension.h",
|
||||
],
|
||||
copts = ABSL_DEFAULT_COPTS,
|
||||
visibility = [
|
||||
"//absl:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
":core_headers",
|
||||
":dynamic_annotations",
|
||||
],
|
||||
)
|
||||
|
||||
# malloc_extension feels like it wants to be folded into this target, but
|
||||
# malloc_internal gets special build treatment to compile at -O3, so these
|
||||
# need to stay separate.
|
||||
cc_library(
|
||||
name = "malloc_internal",
|
||||
srcs = [
|
||||
"internal/low_level_alloc.cc",
|
||||
"internal/malloc_hook.cc",
|
||||
"internal/malloc_hook_mmap_linux.inc",
|
||||
],
|
||||
hdrs = [
|
||||
"internal/direct_mmap.h",
|
||||
"internal/low_level_alloc.h",
|
||||
"internal/malloc_hook.h",
|
||||
"internal/malloc_hook_c.h",
|
||||
],
|
||||
copts = ABSL_DEFAULT_COPTS,
|
||||
textual_hdrs = [
|
||||
"internal/malloc_hook_invoke.h",
|
||||
],
|
||||
visibility = [
|
||||
"//absl:__subpackages__",
|
||||
],
|
||||
|
@ -419,26 +393,3 @@ cc_test(
|
|||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "malloc_extension_system_malloc_test",
|
||||
size = "small",
|
||||
srcs = ["internal/malloc_extension_test.cc"],
|
||||
copts = select({
|
||||
"//absl:windows": [
|
||||
"/DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"-DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1",
|
||||
],
|
||||
}) + ABSL_TEST_COPTS,
|
||||
features = [
|
||||
# This test can't be run under lsan because the test requires system
|
||||
# malloc, and lsan provides a competing malloc implementation.
|
||||
"-leak_sanitize",
|
||||
],
|
||||
deps = [
|
||||
":malloc_extension",
|
||||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -41,10 +41,6 @@ list(APPEND BASE_INTERNAL_HEADERS
|
|||
"internal/inline_variable.h"
|
||||
"internal/low_level_alloc.h"
|
||||
"internal/low_level_scheduling.h"
|
||||
"internal/malloc_extension.h"
|
||||
"internal/malloc_hook_c.h"
|
||||
"internal/malloc_hook.h"
|
||||
"internal/malloc_hook_invoke.h"
|
||||
"internal/per_thread_tls.h"
|
||||
"internal/pretty_function.h"
|
||||
"internal/raw_logging.h"
|
||||
|
@ -69,7 +65,6 @@ list(APPEND BASE_SRC
|
|||
"internal/thread_identity.cc"
|
||||
"internal/unscaledcycleclock.cc"
|
||||
"internal/low_level_alloc.cc"
|
||||
"internal/malloc_hook.cc"
|
||||
${BASE_PUBLIC_HEADERS}
|
||||
${BASE_INTERNAL_HEADERS}
|
||||
)
|
||||
|
@ -86,21 +81,6 @@ absl_library(
|
|||
base
|
||||
)
|
||||
|
||||
# malloc extension library
|
||||
set(MALLOC_EXTENSION_SRC "internal/malloc_extension.cc")
|
||||
set(MALLOC_EXTENSION_PUBLIC_LIBRARIES absl::base)
|
||||
|
||||
absl_library(
|
||||
TARGET
|
||||
absl_malloc_extension
|
||||
SOURCES
|
||||
${MALLOC_EXTENSION_SRC}
|
||||
PUBLIC_LIBRARIES
|
||||
${MALLOC_EXTENSION_PUBLIC_LIBRARIES}
|
||||
EXPORT_NAME
|
||||
malloc_extension
|
||||
)
|
||||
|
||||
# throw delegate library
|
||||
set(THROW_DELEGATE_SRC "internal/throw_delegate.cc")
|
||||
|
||||
|
@ -165,8 +145,6 @@ absl_library(
|
|||
# malloc_internal library
|
||||
list(APPEND MALLOC_INTERNAL_SRC
|
||||
"internal/low_level_alloc.cc"
|
||||
"internal/malloc_hook.cc"
|
||||
"internal/malloc_hook_mmap_linux.inc"
|
||||
)
|
||||
|
||||
absl_library(
|
||||
|
@ -378,23 +356,3 @@ absl_test(
|
|||
PRIVATE_COMPILE_FLAGS
|
||||
${ABSL_EXCEPTIONS_FLAG}
|
||||
)
|
||||
|
||||
# test absl_malloc_extension_system_malloc_test
|
||||
set(MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_SRC "internal/malloc_extension_test.cc")
|
||||
set(MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PUBLIC_LIBRARIES absl::base absl_malloc_extension)
|
||||
set(MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PRIVATE_COMPILE_FLAGS "-DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1")
|
||||
|
||||
absl_test(
|
||||
TARGET
|
||||
absl_malloc_extension_system_malloc_test
|
||||
SOURCES
|
||||
${MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_SRC}
|
||||
PUBLIC_LIBRARIES
|
||||
${MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PUBLIC_LIBRARIES}
|
||||
PRIVATE_COMPILE_FLAGS
|
||||
${MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PRIVATE_COMPILE_FLAGS}
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
// This header file defines casting templates to fit use cases not covered by
|
||||
// the standard casts provided in the C++ standard. As with all cast operations,
|
||||
// use these with caution and only if alternatives do not exist.
|
||||
//
|
||||
|
||||
#ifndef ABSL_BASE_CASTS_H_
|
||||
#define ABSL_BASE_CASTS_H_
|
||||
|
@ -122,7 +121,7 @@ inline To implicit_cast(typename absl::internal::identity_t<To> to) {
|
|||
// object in memory has one type, and a program accesses it with a different
|
||||
// type, the result is undefined behavior for most values of "different type".
|
||||
//
|
||||
// Such casting results is type punning: holding an object in memory of one type
|
||||
// Such casting results in type punning: holding an object in memory of one type
|
||||
// and reading its bits back using a different type. A `bit_cast()` avoids this
|
||||
// issue by implementating its casts using `memcpy()`, which avoids introducing
|
||||
// this undefined behavior.
|
||||
|
|
|
@ -402,29 +402,158 @@ TEST_F(ThrowingAllocatorTest, InList) {
|
|||
for (int i = 0; i < 20; ++i) l.pop_front();
|
||||
}
|
||||
|
||||
struct CallOperator {
|
||||
template <typename TesterInstance, typename = void>
|
||||
struct NullaryTestValidator : public std::false_type {};
|
||||
|
||||
template <typename TesterInstance>
|
||||
struct NullaryTestValidator<
|
||||
TesterInstance,
|
||||
absl::void_t<decltype(std::declval<TesterInstance>().Test())>>
|
||||
: public std::true_type {};
|
||||
|
||||
template <typename TesterInstance>
|
||||
bool HasNullaryTest(const TesterInstance&) {
|
||||
return NullaryTestValidator<TesterInstance>::value;
|
||||
}
|
||||
|
||||
void DummyOp(void*) {}
|
||||
|
||||
template <typename TesterInstance, typename = void>
|
||||
struct UnaryTestValidator : public std::false_type {};
|
||||
|
||||
template <typename TesterInstance>
|
||||
struct UnaryTestValidator<
|
||||
TesterInstance,
|
||||
absl::void_t<decltype(std::declval<TesterInstance>().Test(DummyOp))>>
|
||||
: public std::true_type {};
|
||||
|
||||
template <typename TesterInstance>
|
||||
bool HasUnaryTest(const TesterInstance&) {
|
||||
return UnaryTestValidator<TesterInstance>::value;
|
||||
}
|
||||
|
||||
TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) {
|
||||
using T = exceptions_internal::UninitializedT;
|
||||
auto op = [](T* t) {};
|
||||
auto inv = [](T*) { return testing::AssertionSuccess(); };
|
||||
auto fac = []() { return absl::make_unique<T>(); };
|
||||
|
||||
// Test that providing operation and inveriants still does not allow for the
|
||||
// the invocation of .Test() and .Test(op) because it lacks a factory
|
||||
auto without_fac =
|
||||
absl::MakeExceptionSafetyTester().WithOperation(op).WithInvariants(
|
||||
inv, absl::strong_guarantee);
|
||||
EXPECT_FALSE(HasNullaryTest(without_fac));
|
||||
EXPECT_FALSE(HasUnaryTest(without_fac));
|
||||
|
||||
// Test that providing invariants and factory allows the invocation of
|
||||
// .Test(op) but does not allow for .Test() because it lacks an operation
|
||||
auto without_op = absl::MakeExceptionSafetyTester()
|
||||
.WithInvariants(inv, absl::strong_guarantee)
|
||||
.WithFactory(fac);
|
||||
EXPECT_FALSE(HasNullaryTest(without_op));
|
||||
EXPECT_TRUE(HasUnaryTest(without_op));
|
||||
|
||||
// Test that providing operation and factory still does not allow for the
|
||||
// the invocation of .Test() and .Test(op) because it lacks invariants
|
||||
auto without_inv =
|
||||
absl::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac);
|
||||
EXPECT_FALSE(HasNullaryTest(without_inv));
|
||||
EXPECT_FALSE(HasUnaryTest(without_inv));
|
||||
}
|
||||
|
||||
struct ExampleStruct {};
|
||||
|
||||
std::unique_ptr<ExampleStruct> ExampleFunctionFactory() {
|
||||
return absl::make_unique<ExampleStruct>();
|
||||
}
|
||||
|
||||
void ExampleFunctionOperation(ExampleStruct*) {}
|
||||
|
||||
testing::AssertionResult ExampleFunctionInvariant(ExampleStruct*) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
|
||||
struct {
|
||||
std::unique_ptr<ExampleStruct> operator()() const {
|
||||
return ExampleFunctionFactory();
|
||||
}
|
||||
} example_struct_factory;
|
||||
|
||||
struct {
|
||||
void operator()(ExampleStruct*) const {}
|
||||
} example_struct_operation;
|
||||
|
||||
struct {
|
||||
testing::AssertionResult operator()(ExampleStruct* example_struct) const {
|
||||
return ExampleFunctionInvariant(example_struct);
|
||||
}
|
||||
} example_struct_invariant;
|
||||
|
||||
auto example_lambda_factory = []() { return ExampleFunctionFactory(); };
|
||||
|
||||
auto example_lambda_operation = [](ExampleStruct*) {};
|
||||
|
||||
auto example_lambda_invariant = [](ExampleStruct* example_struct) {
|
||||
return ExampleFunctionInvariant(example_struct);
|
||||
};
|
||||
|
||||
// Testing that function references, pointers, structs with operator() and
|
||||
// lambdas can all be used with ExceptionSafetyTester
|
||||
TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) {
|
||||
// function reference
|
||||
EXPECT_TRUE(absl::MakeExceptionSafetyTester()
|
||||
.WithFactory(ExampleFunctionFactory)
|
||||
.WithOperation(ExampleFunctionOperation)
|
||||
.WithInvariants(ExampleFunctionInvariant)
|
||||
.Test());
|
||||
|
||||
// function pointer
|
||||
EXPECT_TRUE(absl::MakeExceptionSafetyTester()
|
||||
.WithFactory(&ExampleFunctionFactory)
|
||||
.WithOperation(&ExampleFunctionOperation)
|
||||
.WithInvariants(&ExampleFunctionInvariant)
|
||||
.Test());
|
||||
|
||||
// struct
|
||||
EXPECT_TRUE(absl::MakeExceptionSafetyTester()
|
||||
.WithFactory(example_struct_factory)
|
||||
.WithOperation(example_struct_operation)
|
||||
.WithInvariants(example_struct_invariant)
|
||||
.Test());
|
||||
|
||||
// lambda
|
||||
EXPECT_TRUE(absl::MakeExceptionSafetyTester()
|
||||
.WithFactory(example_lambda_factory)
|
||||
.WithOperation(example_lambda_operation)
|
||||
.WithInvariants(example_lambda_invariant)
|
||||
.Test());
|
||||
}
|
||||
|
||||
struct NonNegative {
|
||||
bool operator==(const NonNegative& other) const { return i == other.i; }
|
||||
int i;
|
||||
};
|
||||
|
||||
testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) {
|
||||
if (g->i >= 0) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
return testing::AssertionFailure()
|
||||
<< "i should be non-negative but is " << g->i;
|
||||
}
|
||||
|
||||
struct {
|
||||
template <typename T>
|
||||
void operator()(T* t) const {
|
||||
(*t)();
|
||||
}
|
||||
};
|
||||
} invoker;
|
||||
|
||||
struct NonNegative {
|
||||
friend testing::AssertionResult AbslCheckInvariants(
|
||||
NonNegative* g, absl::InternalAbslNamespaceFinder) {
|
||||
if (g->i >= 0) return testing::AssertionSuccess();
|
||||
return testing::AssertionFailure()
|
||||
<< "i should be non-negative but is " << g->i;
|
||||
}
|
||||
bool operator==(const NonNegative& other) const { return i == other.i; }
|
||||
|
||||
int i;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct DefaultFactory {
|
||||
std::unique_ptr<T> operator()() const { return absl::make_unique<T>(); }
|
||||
};
|
||||
auto tester =
|
||||
absl::MakeExceptionSafetyTester().WithOperation(invoker).WithInvariants(
|
||||
CheckNonNegativeInvariants);
|
||||
auto strong_tester = tester.WithInvariants(absl::strong_guarantee);
|
||||
|
||||
struct FailsBasicGuarantee : public NonNegative {
|
||||
void operator()() {
|
||||
|
@ -435,8 +564,7 @@ struct FailsBasicGuarantee : public NonNegative {
|
|||
};
|
||||
|
||||
TEST(ExceptionCheckTest, BasicGuaranteeFailure) {
|
||||
EXPECT_FALSE(TestExceptionSafety(DefaultFactory<FailsBasicGuarantee>(),
|
||||
CallOperator{}));
|
||||
EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test());
|
||||
}
|
||||
|
||||
struct FollowsBasicGuarantee : public NonNegative {
|
||||
|
@ -447,22 +575,12 @@ struct FollowsBasicGuarantee : public NonNegative {
|
|||
};
|
||||
|
||||
TEST(ExceptionCheckTest, BasicGuarantee) {
|
||||
EXPECT_TRUE(TestExceptionSafety(DefaultFactory<FollowsBasicGuarantee>(),
|
||||
CallOperator{}));
|
||||
EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
|
||||
}
|
||||
|
||||
TEST(ExceptionCheckTest, StrongGuaranteeFailure) {
|
||||
{
|
||||
DefaultFactory<FailsBasicGuarantee> factory;
|
||||
EXPECT_FALSE(
|
||||
TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
|
||||
}
|
||||
|
||||
{
|
||||
DefaultFactory<FollowsBasicGuarantee> factory;
|
||||
EXPECT_FALSE(
|
||||
TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
|
||||
}
|
||||
EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test());
|
||||
EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
|
||||
}
|
||||
|
||||
struct BasicGuaranteeWithExtraInvariants : public NonNegative {
|
||||
|
@ -479,20 +597,21 @@ struct BasicGuaranteeWithExtraInvariants : public NonNegative {
|
|||
constexpr int BasicGuaranteeWithExtraInvariants::kExceptionSentinel;
|
||||
|
||||
TEST(ExceptionCheckTest, BasicGuaranteeWithInvariants) {
|
||||
DefaultFactory<BasicGuaranteeWithExtraInvariants> factory;
|
||||
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
|
||||
|
||||
EXPECT_TRUE(TestExceptionSafety(
|
||||
factory, CallOperator{}, [](BasicGuaranteeWithExtraInvariants* w) {
|
||||
if (w->i == BasicGuaranteeWithExtraInvariants::kExceptionSentinel) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
return testing::AssertionFailure()
|
||||
<< "i should be "
|
||||
<< BasicGuaranteeWithExtraInvariants::kExceptionSentinel
|
||||
<< ", but is " << w->i;
|
||||
}));
|
||||
auto tester_with_val =
|
||||
tester.WithInitialValue(BasicGuaranteeWithExtraInvariants{});
|
||||
EXPECT_TRUE(tester_with_val.Test());
|
||||
EXPECT_TRUE(
|
||||
tester_with_val
|
||||
.WithInvariants([](BasicGuaranteeWithExtraInvariants* w) {
|
||||
if (w->i == BasicGuaranteeWithExtraInvariants::kExceptionSentinel) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
return testing::AssertionFailure()
|
||||
<< "i should be "
|
||||
<< BasicGuaranteeWithExtraInvariants::kExceptionSentinel
|
||||
<< ", but is " << w->i;
|
||||
})
|
||||
.Test());
|
||||
}
|
||||
|
||||
struct FollowsStrongGuarantee : public NonNegative {
|
||||
|
@ -500,10 +619,8 @@ struct FollowsStrongGuarantee : public NonNegative {
|
|||
};
|
||||
|
||||
TEST(ExceptionCheckTest, StrongGuarantee) {
|
||||
DefaultFactory<FollowsStrongGuarantee> factory;
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
|
||||
EXPECT_TRUE(
|
||||
TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
|
||||
EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
|
||||
EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
|
||||
}
|
||||
|
||||
struct HasReset : public NonNegative {
|
||||
|
@ -514,38 +631,36 @@ struct HasReset : public NonNegative {
|
|||
}
|
||||
|
||||
void reset() { i = 0; }
|
||||
|
||||
friend bool AbslCheckInvariants(HasReset* h,
|
||||
absl::InternalAbslNamespaceFinder) {
|
||||
h->reset();
|
||||
return h->i == 0;
|
||||
}
|
||||
};
|
||||
|
||||
testing::AssertionResult CheckHasResetInvariants(HasReset* h) {
|
||||
h->reset();
|
||||
return testing::AssertionResult(h->i == 0);
|
||||
}
|
||||
|
||||
TEST(ExceptionCheckTest, ModifyingChecker) {
|
||||
{
|
||||
DefaultFactory<FollowsBasicGuarantee> factory;
|
||||
EXPECT_FALSE(TestExceptionSafety(
|
||||
factory, CallOperator{},
|
||||
[](FollowsBasicGuarantee* g) {
|
||||
g->i = 1000;
|
||||
return true;
|
||||
},
|
||||
[](FollowsBasicGuarantee* g) { return g->i == 1000; }));
|
||||
}
|
||||
{
|
||||
DefaultFactory<FollowsStrongGuarantee> factory;
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{},
|
||||
[](FollowsStrongGuarantee* g) {
|
||||
++g->i;
|
||||
return true;
|
||||
},
|
||||
StrongGuarantee(factory)));
|
||||
}
|
||||
{
|
||||
DefaultFactory<HasReset> factory;
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
|
||||
}
|
||||
auto set_to_1000 = [](FollowsBasicGuarantee* g) {
|
||||
g->i = 1000;
|
||||
return testing::AssertionSuccess();
|
||||
};
|
||||
auto is_1000 = [](FollowsBasicGuarantee* g) {
|
||||
return testing::AssertionResult(g->i == 1000);
|
||||
};
|
||||
auto increment = [](FollowsStrongGuarantee* g) {
|
||||
++g->i;
|
||||
return testing::AssertionSuccess();
|
||||
};
|
||||
|
||||
EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{})
|
||||
.WithInvariants(set_to_1000, is_1000)
|
||||
.Test());
|
||||
EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{})
|
||||
.WithInvariants(increment)
|
||||
.Test());
|
||||
EXPECT_TRUE(absl::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(HasReset{})
|
||||
.WithInvariants(CheckHasResetInvariants)
|
||||
.Test(invoker));
|
||||
}
|
||||
|
||||
struct NonCopyable : public NonNegative {
|
||||
|
@ -556,10 +671,9 @@ struct NonCopyable : public NonNegative {
|
|||
};
|
||||
|
||||
TEST(ExceptionCheckTest, NonCopyable) {
|
||||
DefaultFactory<NonCopyable> factory;
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
|
||||
EXPECT_TRUE(
|
||||
TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
|
||||
auto factory = []() { return absl::make_unique<NonCopyable>(); };
|
||||
EXPECT_TRUE(tester.WithFactory(factory).Test());
|
||||
EXPECT_TRUE(strong_tester.WithFactory(factory).Test());
|
||||
}
|
||||
|
||||
struct NonEqualityComparable : public NonNegative {
|
||||
|
@ -574,15 +688,15 @@ struct NonEqualityComparable : public NonNegative {
|
|||
};
|
||||
|
||||
TEST(ExceptionCheckTest, NonEqualityComparable) {
|
||||
DefaultFactory<NonEqualityComparable> factory;
|
||||
auto comp = [](const NonEqualityComparable& a,
|
||||
const NonEqualityComparable& b) { return a.i == b.i; };
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
|
||||
EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{},
|
||||
absl::StrongGuarantee(factory, comp)));
|
||||
EXPECT_FALSE(TestExceptionSafety(
|
||||
factory, [&](NonEqualityComparable* n) { n->ModifyOnThrow(); },
|
||||
absl::StrongGuarantee(factory, comp)));
|
||||
auto nec_is_strong = [](NonEqualityComparable* nec) {
|
||||
return testing::AssertionResult(nec->i == NonEqualityComparable().i);
|
||||
};
|
||||
auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{})
|
||||
.WithInvariants(nec_is_strong);
|
||||
|
||||
EXPECT_TRUE(strong_nec_tester.Test());
|
||||
EXPECT_FALSE(strong_nec_tester.Test(
|
||||
[](NonEqualityComparable* n) { n->ModifyOnThrow(); }));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -604,28 +718,32 @@ struct ExhaustivenessTester {
|
|||
return true;
|
||||
}
|
||||
|
||||
friend testing::AssertionResult AbslCheckInvariants(
|
||||
ExhaustivenessTester*, absl::InternalAbslNamespaceFinder) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
|
||||
static unsigned char successes;
|
||||
};
|
||||
|
||||
struct {
|
||||
template <typename T>
|
||||
testing::AssertionResult operator()(ExhaustivenessTester<T>*) const {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
} CheckExhaustivenessTesterInvariants;
|
||||
|
||||
template <typename T>
|
||||
unsigned char ExhaustivenessTester<T>::successes = 0;
|
||||
|
||||
TEST(ExceptionCheckTest, Exhaustiveness) {
|
||||
DefaultFactory<ExhaustivenessTester<int>> int_factory;
|
||||
EXPECT_TRUE(TestExceptionSafety(int_factory, CallOperator{}));
|
||||
auto exhaust_tester = absl::MakeExceptionSafetyTester()
|
||||
.WithInvariants(CheckExhaustivenessTesterInvariants)
|
||||
.WithOperation(invoker);
|
||||
|
||||
EXPECT_TRUE(
|
||||
exhaust_tester.WithInitialValue(ExhaustivenessTester<int>{}).Test());
|
||||
EXPECT_EQ(ExhaustivenessTester<int>::successes, 0xF);
|
||||
|
||||
DefaultFactory<ExhaustivenessTester<ThrowingValue<>>> bomb_factory;
|
||||
EXPECT_TRUE(TestExceptionSafety(bomb_factory, CallOperator{}));
|
||||
EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
|
||||
|
||||
ExhaustivenessTester<ThrowingValue<>>::successes = 0;
|
||||
EXPECT_TRUE(TestExceptionSafety(bomb_factory, CallOperator{},
|
||||
StrongGuarantee(bomb_factory)));
|
||||
EXPECT_TRUE(
|
||||
exhaust_tester.WithInitialValue(ExhaustivenessTester<ThrowingValue<>>{})
|
||||
.WithInvariants(absl::strong_guarantee)
|
||||
.Test());
|
||||
EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ namespace base_internal {
|
|||
// Platform specific logic extracted from
|
||||
// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
|
||||
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
|
||||
off64_t offset) __THROW {
|
||||
off64_t offset) noexcept {
|
||||
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
|
||||
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
|
||||
(defined(__PPC__) && !defined(__PPC64__)) || \
|
||||
|
@ -129,6 +129,9 @@ inline int DirectMunmap(void* start, size_t length) {
|
|||
// For non-linux platforms where we have mmap, just dispatch directly to the
|
||||
// actual mmap()/munmap() methods.
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
|
||||
off_t offset) {
|
||||
return mmap(start, length, prot, flags, fd, offset);
|
||||
|
@ -138,6 +141,9 @@ inline int DirectMunmap(void* start, size_t length) {
|
|||
return munmap(start, length);
|
||||
}
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
#endif // __linux__
|
||||
|
||||
#endif // ABSL_HAVE_MMAP
|
||||
|
|
|
@ -18,7 +18,10 @@
|
|||
#include "absl/meta/type_traits.h"
|
||||
|
||||
namespace absl {
|
||||
|
||||
exceptions_internal::NoThrowTag no_throw_ctor;
|
||||
exceptions_internal::StrongGuaranteeTagType strong_guarantee;
|
||||
|
||||
namespace exceptions_internal {
|
||||
|
||||
int countdown = -1;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <initializer_list>
|
||||
#include <iosfwd>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
@ -35,7 +36,6 @@
|
|||
#include "absl/types/optional.h"
|
||||
|
||||
namespace absl {
|
||||
struct InternalAbslNamespaceFinder {};
|
||||
|
||||
struct ConstructorTracker;
|
||||
|
||||
|
@ -63,6 +63,7 @@ constexpr NoThrow operator&(NoThrow a, NoThrow b) {
|
|||
|
||||
namespace exceptions_internal {
|
||||
struct NoThrowTag {};
|
||||
struct StrongGuaranteeTagType {};
|
||||
|
||||
constexpr bool ThrowingAllowed(NoThrow flags, NoThrow flag) {
|
||||
return !static_cast<bool>(flags & flag);
|
||||
|
@ -87,8 +88,7 @@ class TestException {
|
|||
// bad_alloc exception in TestExceptionSafety.
|
||||
class TestBadAllocException : public std::bad_alloc, public TestException {
|
||||
public:
|
||||
explicit TestBadAllocException(absl::string_view msg)
|
||||
: TestException(msg) {}
|
||||
explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
|
||||
using TestException::what;
|
||||
};
|
||||
|
||||
|
@ -128,75 +128,73 @@ class TrackedObject {
|
|||
friend struct ::absl::ConstructorTracker;
|
||||
};
|
||||
|
||||
template <typename Factory>
|
||||
using FactoryType = typename absl::result_of_t<Factory()>::element_type;
|
||||
|
||||
// Returns an optional with the result of the check if op fails, or an empty
|
||||
// optional if op passes
|
||||
template <typename Factory, typename Op, typename Checker>
|
||||
absl::optional<testing::AssertionResult> TestCheckerAtCountdown(
|
||||
Factory factory, const Op& op, int count, const Checker& check) {
|
||||
template <typename Factory, typename Operation, typename Invariant>
|
||||
absl::optional<testing::AssertionResult> TestSingleInvariantAtCountdownImpl(
|
||||
const Factory& factory, const Operation& operation, int count,
|
||||
const Invariant& invariant) {
|
||||
auto t_ptr = factory();
|
||||
absl::optional<testing::AssertionResult> out;
|
||||
absl::optional<testing::AssertionResult> current_res;
|
||||
exceptions_internal::countdown = count;
|
||||
try {
|
||||
exceptions_internal::countdown = count;
|
||||
op(t_ptr.get());
|
||||
operation(t_ptr.get());
|
||||
} catch (const exceptions_internal::TestException& e) {
|
||||
out.emplace(check(t_ptr.get()));
|
||||
if (!*out) {
|
||||
*out << " caused by exception thrown by " << e.what();
|
||||
current_res.emplace(invariant(t_ptr.get()));
|
||||
if (!current_res.value()) {
|
||||
*current_res << e.what() << " failed invariant check";
|
||||
}
|
||||
}
|
||||
return out;
|
||||
exceptions_internal::countdown = -1;
|
||||
return current_res;
|
||||
}
|
||||
|
||||
template <typename Factory, typename Op, typename Checker>
|
||||
int UpdateOut(Factory factory, const Op& op, int count, const Checker& checker,
|
||||
testing::AssertionResult* out) {
|
||||
if (*out) *out = *TestCheckerAtCountdown(factory, op, count, checker);
|
||||
template <typename Factory, typename Operation>
|
||||
absl::optional<testing::AssertionResult> TestSingleInvariantAtCountdownImpl(
|
||||
const Factory& factory, const Operation& operation, int count,
|
||||
StrongGuaranteeTagType) {
|
||||
using TPtr = typename decltype(factory())::pointer;
|
||||
auto t_is_strong = [&](TPtr t) { return *t == *factory(); };
|
||||
return TestSingleInvariantAtCountdownImpl(factory, operation, count,
|
||||
t_is_strong);
|
||||
}
|
||||
|
||||
template <typename Factory, typename Operation, typename Invariant>
|
||||
int TestSingleInvariantAtCountdown(
|
||||
const Factory& factory, const Operation& operation, int count,
|
||||
const Invariant& invariant,
|
||||
absl::optional<testing::AssertionResult>* reduced_res) {
|
||||
// If reduced_res is empty, it means the current call to
|
||||
// TestSingleInvariantAtCountdown(...) is the first test being run so we do
|
||||
// want to run it. Alternatively, if it's not empty (meaning a previous test
|
||||
// has run) we want to check if it passed. If the previous test did pass, we
|
||||
// want to contine running tests so we do want to run the current one. If it
|
||||
// failed, we want to short circuit so as not to overwrite the AssertionResult
|
||||
// output. If that's the case, we do not run the current test and instead we
|
||||
// simply return.
|
||||
if (!reduced_res->has_value() || reduced_res->value()) {
|
||||
*reduced_res = TestSingleInvariantAtCountdownImpl(factory, operation, count,
|
||||
invariant);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Declare AbslCheckInvariants so that it can be found eventually via ADL.
|
||||
// Taking `...` gives it the lowest possible precedence.
|
||||
void AbslCheckInvariants(...);
|
||||
|
||||
// Returns an optional with the result of the check if op fails, or an empty
|
||||
// optional if op passes
|
||||
template <typename Factory, typename Op, typename... Checkers>
|
||||
absl::optional<testing::AssertionResult> TestAtCountdown(
|
||||
Factory factory, const Op& op, int count, const Checkers&... checkers) {
|
||||
// Don't bother with the checkers if the class invariants are already broken.
|
||||
auto out = TestCheckerAtCountdown(
|
||||
factory, op, count, [](FactoryType<Factory>* t_ptr) {
|
||||
return AbslCheckInvariants(t_ptr, InternalAbslNamespaceFinder());
|
||||
});
|
||||
if (!out.has_value()) return out;
|
||||
template <typename Factory, typename Operation, typename... Invariants>
|
||||
inline absl::optional<testing::AssertionResult> TestAllInvariantsAtCountdown(
|
||||
const Factory& factory, const Operation& operation, int count,
|
||||
const Invariants&... invariants) {
|
||||
absl::optional<testing::AssertionResult> reduced_res;
|
||||
|
||||
// Run each checker, short circuiting after the first failure
|
||||
int dummy[] = {0, (UpdateOut(factory, op, count, checkers, &*out))...};
|
||||
int dummy[] = {
|
||||
0, (TestSingleInvariantAtCountdown(factory, operation, count, invariants,
|
||||
&reduced_res))...};
|
||||
static_cast<void>(dummy);
|
||||
return out;
|
||||
return reduced_res;
|
||||
}
|
||||
|
||||
template <typename T, typename EqualTo>
|
||||
class StrongGuaranteeTester {
|
||||
public:
|
||||
explicit StrongGuaranteeTester(std::unique_ptr<T> t_ptr, EqualTo eq) noexcept
|
||||
: val_(std::move(t_ptr)), eq_(eq) {}
|
||||
|
||||
testing::AssertionResult operator()(T* other) const {
|
||||
return eq_(*val_, *other) ? testing::AssertionSuccess()
|
||||
: testing::AssertionFailure() << "State changed";
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<T> val_;
|
||||
EqualTo eq_;
|
||||
};
|
||||
} // namespace exceptions_internal
|
||||
|
||||
extern exceptions_internal::NoThrowTag no_throw_ctor;
|
||||
extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
|
||||
|
||||
// These are useful for tests which just construct objects and make sure there
|
||||
// are no leaks.
|
||||
|
@ -208,7 +206,7 @@ inline void UnsetCountdown() { exceptions_internal::countdown = -1; }
|
|||
class ThrowingBool {
|
||||
public:
|
||||
ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit)
|
||||
operator bool() const { // NOLINT(runtime/explicit)
|
||||
operator bool() const { // NOLINT
|
||||
exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
|
||||
return b_;
|
||||
}
|
||||
|
@ -734,10 +732,9 @@ template <typename T, typename... Args>
|
|||
T TestThrowingCtor(Args&&... args) {
|
||||
struct Cleanup {
|
||||
~Cleanup() { UnsetCountdown(); }
|
||||
};
|
||||
Cleanup c;
|
||||
for (int countdown = 0;; ++countdown) {
|
||||
exceptions_internal::countdown = countdown;
|
||||
} c;
|
||||
for (int count = 0;; ++count) {
|
||||
exceptions_internal::countdown = count;
|
||||
try {
|
||||
return T(std::forward<Args>(args)...);
|
||||
} catch (const exceptions_internal::TestException&) {
|
||||
|
@ -745,58 +742,237 @@ T TestThrowingCtor(Args&&... args) {
|
|||
}
|
||||
}
|
||||
|
||||
// Tests that performing operation Op on a T follows exception safety
|
||||
// guarantees. By default only tests the basic guarantee. There must be a
|
||||
// function, AbslCheckInvariants(T*, absl::InternalAbslNamespaceFinder) which
|
||||
// returns anything convertible to bool and which makes sure the invariants of
|
||||
// the type are upheld. This is called before any of the checkers. The
|
||||
// InternalAbslNamespaceFinder is unused, and just helps find
|
||||
// AbslCheckInvariants for absl types which become aliases to std::types in
|
||||
// C++17.
|
||||
//
|
||||
// Parameters:
|
||||
// * TFactory: operator() returns a unique_ptr to the type under test (T). It
|
||||
// should always return pointers to values which compare equal.
|
||||
// * FunctionFromTPtrToVoid: A functor exercising the function under test. It
|
||||
// should take a T* and return void.
|
||||
// * Checkers: Any number of functions taking a T* and returning
|
||||
// anything contextually convertible to bool. If a testing::AssertionResult
|
||||
// is used then the error message is kept. These test invariants related to
|
||||
// the operation. To test the strong guarantee, pass
|
||||
// absl::StrongGuarantee(factory). A checker may freely modify the passed-in
|
||||
// T, for example to make sure the T can be set to a known state.
|
||||
template <typename TFactory, typename FunctionFromTPtrToVoid,
|
||||
typename... Checkers>
|
||||
testing::AssertionResult TestExceptionSafety(TFactory factory,
|
||||
FunctionFromTPtrToVoid&& op,
|
||||
const Checkers&... checkers) {
|
||||
struct Cleanup {
|
||||
~Cleanup() { UnsetCountdown(); }
|
||||
} c;
|
||||
for (int countdown = 0;; ++countdown) {
|
||||
auto out = exceptions_internal::TestAtCountdown(factory, op, countdown,
|
||||
checkers...);
|
||||
if (!out.has_value()) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
if (!*out) return *out;
|
||||
}
|
||||
}
|
||||
namespace exceptions_internal {
|
||||
|
||||
// Returns a functor to test for the strong exception-safety guarantee.
|
||||
// Equality comparisons are made against the T provided by the factory and
|
||||
// default to using operator==.
|
||||
//
|
||||
// Parameters:
|
||||
// * TFactory: operator() returns a unique_ptr to the type under test. It
|
||||
// should always return pointers to values which compare equal.
|
||||
template <typename TFactory, typename EqualTo = std::equal_to<
|
||||
exceptions_internal::FactoryType<TFactory>>>
|
||||
exceptions_internal::StrongGuaranteeTester<
|
||||
exceptions_internal::FactoryType<TFactory>, EqualTo>
|
||||
StrongGuarantee(TFactory factory, EqualTo eq = EqualTo()) {
|
||||
return exceptions_internal::StrongGuaranteeTester<
|
||||
exceptions_internal::FactoryType<TFactory>, EqualTo>(factory(), eq);
|
||||
// Dummy struct for ExceptionSafetyTester<> partial state.
|
||||
struct UninitializedT {};
|
||||
|
||||
template <typename T>
|
||||
class DefaultFactory {
|
||||
public:
|
||||
explicit DefaultFactory(const T& t) : t_(t) {}
|
||||
std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); }
|
||||
|
||||
private:
|
||||
T t_;
|
||||
};
|
||||
|
||||
template <size_t LazyInvariantsCount, typename LazyFactory,
|
||||
typename LazyOperation>
|
||||
using EnableIfTestable = typename absl::enable_if_t<
|
||||
LazyInvariantsCount != 0 &&
|
||||
!std::is_same<LazyFactory, UninitializedT>::value &&
|
||||
!std::is_same<LazyOperation, UninitializedT>::value>;
|
||||
|
||||
template <typename Factory = UninitializedT,
|
||||
typename Operation = UninitializedT, typename... Invariants>
|
||||
class ExceptionSafetyTester;
|
||||
|
||||
} // namespace exceptions_internal
|
||||
|
||||
exceptions_internal::ExceptionSafetyTester<> MakeExceptionSafetyTester();
|
||||
|
||||
namespace exceptions_internal {
|
||||
|
||||
/*
|
||||
* Builds a tester object that tests if performing a operation on a T follows
|
||||
* exception safety guarantees. Verification is done via invariant assertion
|
||||
* callbacks applied to T instances post-throw.
|
||||
*
|
||||
* Template parameters for ExceptionSafetyTester:
|
||||
*
|
||||
* - Factory: The factory object (passed in via tester.WithFactory(...) or
|
||||
* tester.WithInitialValue(...)) must be invocable with the signature
|
||||
* `std::unique_ptr<T> operator()() const` where T is the type being tested.
|
||||
* It is used for reliably creating identical T instances to test on.
|
||||
*
|
||||
* - Operation: The operation object (passsed in via tester.WithOperation(...)
|
||||
* or tester.Test(...)) must be invocable with the signature
|
||||
* `void operator()(T*) const` where T is the type being tested. It is used
|
||||
* for performing steps on a T instance that may throw and that need to be
|
||||
* checked for exception safety. Each call to the operation will receive a
|
||||
* fresh T instance so it's free to modify and destroy the T instances as it
|
||||
* pleases.
|
||||
*
|
||||
* - Invariants...: The invariant assertion callback objects (passed in via
|
||||
* tester.WithInvariants(...)) must be invocable with the signature
|
||||
* `testing::AssertionResult operator()(T*) const` where T is the type being
|
||||
* tested. Invariant assertion callbacks are provided T instances post-throw.
|
||||
* They must return testing::AssertionSuccess when the type invariants of the
|
||||
* provided T instance hold. If the type invariants of the T instance do not
|
||||
* hold, they must return testing::AssertionFailure. Execution order of
|
||||
* Invariants... is unspecified. They will each individually get a fresh T
|
||||
* instance so they are free to modify and destroy the T instances as they
|
||||
* please.
|
||||
*/
|
||||
template <typename Factory, typename Operation, typename... Invariants>
|
||||
class ExceptionSafetyTester {
|
||||
public:
|
||||
/*
|
||||
* Returns a new ExceptionSafetyTester with an included T factory based on the
|
||||
* provided T instance. The existing factory will not be included in the newly
|
||||
* created tester instance. The created factory returns a new T instance by
|
||||
* copy-constructing the provided const T& t.
|
||||
*
|
||||
* Preconditions for tester.WithInitialValue(const T& t):
|
||||
*
|
||||
* - The const T& t object must be copy-constructible where T is the type
|
||||
* being tested. For non-copy-constructible objects, use the method
|
||||
* tester.WithFactory(...).
|
||||
*/
|
||||
template <typename T>
|
||||
ExceptionSafetyTester<DefaultFactory<T>, Operation, Invariants...>
|
||||
WithInitialValue(const T& t) const {
|
||||
return WithFactory(DefaultFactory<T>(t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a new ExceptionSafetyTester with the provided T factory included.
|
||||
* The existing factory will not be included in the newly-created tester
|
||||
* instance. This method is intended for use with types lacking a copy
|
||||
* constructor. Types that can be copy-constructed should instead use the
|
||||
* method tester.WithInitialValue(...).
|
||||
*/
|
||||
template <typename NewFactory>
|
||||
ExceptionSafetyTester<absl::decay_t<NewFactory>, Operation, Invariants...>
|
||||
WithFactory(const NewFactory& new_factory) const {
|
||||
return {new_factory, operation_, invariants_};
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a new ExceptionSafetyTester with the provided testable operation
|
||||
* included. The existing operation will not be included in the newly created
|
||||
* tester.
|
||||
*/
|
||||
template <typename NewOperation>
|
||||
ExceptionSafetyTester<Factory, absl::decay_t<NewOperation>, Invariants...>
|
||||
WithOperation(const NewOperation& new_operation) const {
|
||||
return {factory_, new_operation, invariants_};
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a new ExceptionSafetyTester with the provided MoreInvariants...
|
||||
* combined with the Invariants... that were already included in the instance
|
||||
* on which the method was called. Invariants... cannot be removed or replaced
|
||||
* once added to an ExceptionSafetyTester instance. A fresh object must be
|
||||
* created in order to get an empty Invariants... list.
|
||||
*
|
||||
* In addition to passing in custom invariant assertion callbacks, this method
|
||||
* accepts `absl::strong_guarantee` as an argument which checks T instances
|
||||
* post-throw against freshly created T instances via operator== to verify
|
||||
* that any state changes made during the execution of the operation were
|
||||
* properly rolled back.
|
||||
*/
|
||||
template <typename... MoreInvariants>
|
||||
ExceptionSafetyTester<Factory, Operation, Invariants...,
|
||||
absl::decay_t<MoreInvariants>...>
|
||||
WithInvariants(const MoreInvariants&... more_invariants) const {
|
||||
return {factory_, operation_,
|
||||
std::tuple_cat(invariants_,
|
||||
std::tuple<absl::decay_t<MoreInvariants>...>(
|
||||
more_invariants...))};
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a testing::AssertionResult that is the reduced result of the
|
||||
* exception safety algorithm. The algorithm short circuits and returns
|
||||
* AssertionFailure after the first invariant callback returns an
|
||||
* AssertionFailure. Otherwise, if all invariant callbacks return an
|
||||
* AssertionSuccess, the reduced result is AssertionSuccess.
|
||||
*
|
||||
* The passed-in testable operation will not be saved in a new tester instance
|
||||
* nor will it modify/replace the existing tester instance. This is useful
|
||||
* when each operation being tested is unique and does not need to be reused.
|
||||
*
|
||||
* Preconditions for tester.Test(const NewOperation& new_operation):
|
||||
*
|
||||
* - May only be called after at least one invariant assertion callback and a
|
||||
* factory or initial value have been provided.
|
||||
*/
|
||||
template <
|
||||
typename NewOperation,
|
||||
typename = EnableIfTestable<sizeof...(Invariants), Factory, NewOperation>>
|
||||
testing::AssertionResult Test(const NewOperation& new_operation) const {
|
||||
return TestImpl(new_operation, absl::index_sequence_for<Invariants...>());
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a testing::AssertionResult that is the reduced result of the
|
||||
* exception safety algorithm. The algorithm short circuits and returns
|
||||
* AssertionFailure after the first invariant callback returns an
|
||||
* AssertionFailure. Otherwise, if all invariant callbacks return an
|
||||
* AssertionSuccess, the reduced result is AssertionSuccess.
|
||||
*
|
||||
* Preconditions for tester.Test():
|
||||
*
|
||||
* - May only be called after at least one invariant assertion callback, a
|
||||
* factory or initial value and a testable operation have been provided.
|
||||
*/
|
||||
template <typename LazyOperation = Operation,
|
||||
typename =
|
||||
EnableIfTestable<sizeof...(Invariants), Factory, LazyOperation>>
|
||||
testing::AssertionResult Test() const {
|
||||
return TestImpl(operation_, absl::index_sequence_for<Invariants...>());
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename, typename, typename...>
|
||||
friend class ExceptionSafetyTester;
|
||||
|
||||
friend ExceptionSafetyTester<> absl::MakeExceptionSafetyTester();
|
||||
|
||||
ExceptionSafetyTester() {}
|
||||
|
||||
ExceptionSafetyTester(const Factory& f, const Operation& o,
|
||||
const std::tuple<Invariants...>& i)
|
||||
: factory_(f), operation_(o), invariants_(i) {}
|
||||
|
||||
template <typename SelectedOperation, size_t... Indices>
|
||||
testing::AssertionResult TestImpl(const SelectedOperation& selected_operation,
|
||||
absl::index_sequence<Indices...>) const {
|
||||
// Starting from 0 and counting upwards until one of the exit conditions is
|
||||
// hit...
|
||||
for (int count = 0;; ++count) {
|
||||
// Run the full exception safety test algorithm for the current countdown
|
||||
auto reduced_res =
|
||||
TestAllInvariantsAtCountdown(factory_, selected_operation, count,
|
||||
std::get<Indices>(invariants_)...);
|
||||
// If there is no value in the optional, no invariants were run because no
|
||||
// exception was thrown. This means that the test is complete and the loop
|
||||
// can exit successfully.
|
||||
if (!reduced_res.has_value()) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
// If the optional is not empty and the value is falsy, an invariant check
|
||||
// failed so the test must exit to propegate the failure.
|
||||
if (!reduced_res.value()) {
|
||||
return reduced_res.value();
|
||||
}
|
||||
// If the optional is not empty and the value is not falsy, it means
|
||||
// exceptions were thrown but the invariants passed so the test must
|
||||
// continue to run.
|
||||
}
|
||||
}
|
||||
|
||||
Factory factory_;
|
||||
Operation operation_;
|
||||
std::tuple<Invariants...> invariants_;
|
||||
};
|
||||
|
||||
} // namespace exceptions_internal
|
||||
|
||||
/*
|
||||
* Constructs an empty ExceptionSafetyTester. All ExceptionSafetyTester
|
||||
* objects are immutable and all With[thing] mutation methods return new
|
||||
* instances of ExceptionSafetyTester.
|
||||
*
|
||||
* In order to test a T for exception safety, a factory for that T, a testable
|
||||
* operation, and at least one invariant callback returning an assertion
|
||||
* result must be applied using the respective methods.
|
||||
*/
|
||||
inline exceptions_internal::ExceptionSafetyTester<>
|
||||
MakeExceptionSafetyTester() {
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace absl
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include "absl/base/call_once.h"
|
||||
#include "absl/base/config.h"
|
||||
#include "absl/base/internal/direct_mmap.h"
|
||||
#include "absl/base/internal/scheduling_mode.h"
|
||||
#include "absl/base/macros.h"
|
||||
#include "absl/base/thread_annotations.h"
|
||||
|
@ -49,8 +50,6 @@
|
|||
#include <new> // for placement-new
|
||||
|
||||
#include "absl/base/dynamic_annotations.h"
|
||||
#include "absl/base/internal/malloc_hook.h"
|
||||
#include "absl/base/internal/malloc_hook_invoke.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/internal/spinlock.h"
|
||||
|
||||
|
@ -405,7 +404,7 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
|
|||
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
|
||||
munmap_result = munmap(region, size);
|
||||
} else {
|
||||
munmap_result = MallocHook::UnhookedMUnmap(region, size);
|
||||
munmap_result = base_internal::DirectMunmap(region, size);
|
||||
}
|
||||
if (munmap_result != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
|
||||
|
@ -503,9 +502,6 @@ void LowLevelAlloc::Free(void *v) {
|
|||
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
|
||||
"bad magic number in Free()");
|
||||
LowLevelAlloc::Arena *arena = f->header.arena;
|
||||
if ((arena->flags & kCallMallocHook) != 0) {
|
||||
MallocHook::InvokeDeleteHook(v);
|
||||
}
|
||||
ArenaLock section(arena);
|
||||
AddToFreelist(v, arena);
|
||||
ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
|
||||
|
@ -550,7 +546,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
|
|||
ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
|
||||
#else
|
||||
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
|
||||
new_pages = MallocHook::UnhookedMMap(nullptr, new_pages_size,
|
||||
new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
|
||||
PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
} else {
|
||||
new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
|
||||
|
@ -593,21 +589,12 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
|
|||
|
||||
void *LowLevelAlloc::Alloc(size_t request) {
|
||||
void *result = DoAllocWithArena(request, DefaultArena());
|
||||
// The default arena always calls the malloc hook.
|
||||
// This call must be directly in the user-called allocator function
|
||||
// for MallocHook::GetCallerStackTrace to work properly
|
||||
MallocHook::InvokeNewHook(result, request);
|
||||
return result;
|
||||
}
|
||||
|
||||
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
|
||||
ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
|
||||
void *result = DoAllocWithArena(request, arena);
|
||||
if ((arena->flags & kCallMallocHook) != 0) {
|
||||
// this call must be directly in the user-called allocator function
|
||||
// for MallocHook::GetCallerStackTrace to work properly
|
||||
MallocHook::InvokeNewHook(result, request);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/base/internal/malloc_hook.h"
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
namespace {
|
||||
|
@ -139,58 +137,12 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
|
|||
TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
|
||||
}
|
||||
}
|
||||
|
||||
// used for counting allocates and frees
|
||||
static int32_t allocates;
|
||||
static int32_t frees;
|
||||
|
||||
// ignore uses of the allocator not triggered by our test
|
||||
static std::thread::id* test_tid;
|
||||
|
||||
// called on each alloc if kCallMallocHook specified
|
||||
static void AllocHook(const void *p, size_t size) {
|
||||
if (using_low_level_alloc) {
|
||||
if (*test_tid == std::this_thread::get_id()) {
|
||||
allocates++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// called on each free if kCallMallocHook specified
|
||||
static void FreeHook(const void *p) {
|
||||
if (using_low_level_alloc) {
|
||||
if (*test_tid == std::this_thread::get_id()) {
|
||||
frees++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LowLevelAlloc is designed to be safe to call before main().
|
||||
static struct BeforeMain {
|
||||
BeforeMain() {
|
||||
test_tid = new std::thread::id(std::this_thread::get_id());
|
||||
TEST_ASSERT(MallocHook::AddNewHook(&AllocHook));
|
||||
TEST_ASSERT(MallocHook::AddDeleteHook(&FreeHook));
|
||||
TEST_ASSERT(allocates == 0);
|
||||
TEST_ASSERT(frees == 0);
|
||||
Test(false, false, 50000);
|
||||
TEST_ASSERT(allocates != 0); // default arena calls hooks
|
||||
TEST_ASSERT(frees != 0);
|
||||
for (int i = 0; i != 16; i++) {
|
||||
bool call_hooks = ((i & 1) == 1);
|
||||
allocates = 0;
|
||||
frees = 0;
|
||||
Test(true, call_hooks, 15000);
|
||||
if (call_hooks) {
|
||||
TEST_ASSERT(allocates > 5000); // arena calls hooks
|
||||
TEST_ASSERT(frees > 5000);
|
||||
} else {
|
||||
TEST_ASSERT(allocates == 0); // arena doesn't call hooks
|
||||
TEST_ASSERT(frees == 0);
|
||||
}
|
||||
}
|
||||
TEST_ASSERT(MallocHook::RemoveNewHook(&AllocHook));
|
||||
TEST_ASSERT(MallocHook::RemoveDeleteHook(&FreeHook));
|
||||
Test(true, false, 50000);
|
||||
Test(true, true, 50000);
|
||||
}
|
||||
} before_main;
|
||||
|
||||
|
|
|
@ -1,162 +0,0 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/base/internal/malloc_extension.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <atomic>
|
||||
#include <string>
|
||||
|
||||
#include "absl/base/dynamic_annotations.h"
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
// SysAllocator implementation
|
||||
SysAllocator::~SysAllocator() {}
|
||||
void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; }
|
||||
|
||||
// Dummy key method to avoid weak vtable.
|
||||
void MallocExtensionWriter::UnusedKeyMethod() {}
|
||||
|
||||
void StringMallocExtensionWriter::Write(const char* buf, int len) {
|
||||
out_->append(buf, len);
|
||||
}
|
||||
|
||||
// Default implementation -- does nothing
|
||||
MallocExtension::~MallocExtension() { }
|
||||
bool MallocExtension::VerifyAllMemory() { return true; }
|
||||
bool MallocExtension::VerifyNewMemory(const void*) { return true; }
|
||||
bool MallocExtension::VerifyArrayNewMemory(const void*) { return true; }
|
||||
bool MallocExtension::VerifyMallocMemory(const void*) { return true; }
|
||||
|
||||
bool MallocExtension::GetNumericProperty(const char*, size_t*) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MallocExtension::SetNumericProperty(const char*, size_t) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void MallocExtension::GetStats(char* buffer, int length) {
|
||||
assert(length > 0);
|
||||
static_cast<void>(length);
|
||||
buffer[0] = '\0';
|
||||
}
|
||||
|
||||
bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total,
|
||||
int histogram[kMallocHistogramSize]) {
|
||||
*blocks = 0;
|
||||
*total = 0;
|
||||
memset(histogram, 0, sizeof(*histogram) * kMallocHistogramSize);
|
||||
return true;
|
||||
}
|
||||
|
||||
void MallocExtension::MarkThreadIdle() {
|
||||
// Default implementation does nothing
|
||||
}
|
||||
|
||||
void MallocExtension::MarkThreadBusy() {
|
||||
// Default implementation does nothing
|
||||
}
|
||||
|
||||
SysAllocator* MallocExtension::GetSystemAllocator() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MallocExtension::SetSystemAllocator(SysAllocator*) {
|
||||
// Default implementation does nothing
|
||||
}
|
||||
|
||||
void MallocExtension::ReleaseToSystem(size_t) {
|
||||
// Default implementation does nothing
|
||||
}
|
||||
|
||||
void MallocExtension::ReleaseFreeMemory() {
|
||||
ReleaseToSystem(static_cast<size_t>(-1)); // SIZE_T_MAX
|
||||
}
|
||||
|
||||
void MallocExtension::SetMemoryReleaseRate(double) {
|
||||
// Default implementation does nothing
|
||||
}
|
||||
|
||||
double MallocExtension::GetMemoryReleaseRate() {
|
||||
return -1.0;
|
||||
}
|
||||
|
||||
size_t MallocExtension::GetEstimatedAllocatedSize(size_t size) {
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t MallocExtension::GetAllocatedSize(const void* p) {
|
||||
assert(GetOwnership(p) != kNotOwned);
|
||||
static_cast<void>(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MallocExtension::Ownership MallocExtension::GetOwnership(const void*) {
|
||||
return kUnknownOwnership;
|
||||
}
|
||||
|
||||
void MallocExtension::GetProperties(MallocExtension::StatLevel,
|
||||
std::map<std::string, Property>* result) {
|
||||
result->clear();
|
||||
}
|
||||
|
||||
size_t MallocExtension::ReleaseCPUMemory(int) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// The current malloc extension object.
|
||||
|
||||
std::atomic<MallocExtension*> MallocExtension::current_instance_;
|
||||
|
||||
MallocExtension* MallocExtension::InitModule() {
|
||||
MallocExtension* ext = new MallocExtension;
|
||||
current_instance_.store(ext, std::memory_order_release);
|
||||
return ext;
|
||||
}
|
||||
|
||||
void MallocExtension::Register(MallocExtension* implementation) {
|
||||
InitModuleOnce();
|
||||
// When running under valgrind, our custom malloc is replaced with
|
||||
// valgrind's one and malloc extensions will not work. (Note:
|
||||
// callers should be responsible for checking that they are the
|
||||
// malloc that is really being run, before calling Register. This
|
||||
// is just here as an extra sanity check.)
|
||||
// Under compiler-based ThreadSanitizer RunningOnValgrind() returns true,
|
||||
// but we still want to use malloc extensions.
|
||||
#ifndef THREAD_SANITIZER
|
||||
if (RunningOnValgrind()) {
|
||||
return;
|
||||
}
|
||||
#endif // #ifndef THREAD_SANITIZER
|
||||
current_instance_.store(implementation, std::memory_order_release);
|
||||
}
|
||||
void MallocExtension::GetHeapSample(MallocExtensionWriter*) {}
|
||||
|
||||
void MallocExtension::GetHeapGrowthStacks(MallocExtensionWriter*) {}
|
||||
|
||||
void MallocExtension::GetFragmentationProfile(MallocExtensionWriter*) {}
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
// Default implementation just returns size. The expectation is that
|
||||
// the linked-in malloc implementation might provide an override of
|
||||
// this weak function with a better implementation.
|
||||
ABSL_ATTRIBUTE_WEAK ABSL_ATTRIBUTE_NOINLINE size_t nallocx(size_t size, int) {
|
||||
return size;
|
||||
}
|
|
@ -1,426 +0,0 @@
|
|||
//
|
||||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Extra extensions exported by some malloc implementations. These
|
||||
// extensions are accessed through a virtual base class so an
|
||||
// application can link against a malloc that does not implement these
|
||||
// extensions, and it will get default versions that do nothing.
|
||||
//
|
||||
// NOTE FOR C USERS: If you wish to use this functionality from within
|
||||
// a C program, see malloc_extension_c.h.
|
||||
|
||||
#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_
|
||||
#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/macros.h"
|
||||
#include "absl/base/port.h"
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
class MallocExtensionWriter;
|
||||
|
||||
// Interface to a pluggable system allocator.
|
||||
class SysAllocator {
|
||||
public:
|
||||
SysAllocator() {
|
||||
}
|
||||
virtual ~SysAllocator();
|
||||
|
||||
// Allocates "size"-byte of memory from system aligned with "alignment".
|
||||
// Returns null if failed. Otherwise, the returned pointer p up to and
|
||||
// including (p + actual_size -1) have been allocated.
|
||||
virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
|
||||
|
||||
// Get a human-readable description of the current state of the
|
||||
// allocator. The state is stored as a null-terminated std::string in
|
||||
// a prefix of buffer.
|
||||
virtual void GetStats(char* buffer, int length);
|
||||
};
|
||||
|
||||
// The default implementations of the following routines do nothing.
|
||||
// All implementations should be thread-safe; the current ones
|
||||
// (DebugMallocImplementation and TCMallocImplementation) are.
|
||||
class MallocExtension {
|
||||
public:
|
||||
virtual ~MallocExtension();
|
||||
|
||||
// Verifies that all blocks are valid. Returns true if all are; dumps
|
||||
// core otherwise. A no-op except in debug mode. Even in debug mode,
|
||||
// they may not do any checking except with certain malloc
|
||||
// implementations. Thread-safe.
|
||||
virtual bool VerifyAllMemory();
|
||||
|
||||
// Verifies that p was returned by new, has not been deleted, and is
|
||||
// valid. Returns true if p is good; dumps core otherwise. A no-op
|
||||
// except in debug mode. Even in debug mode, may not do any checking
|
||||
// except with certain malloc implementations. Thread-safe.
|
||||
virtual bool VerifyNewMemory(const void* p);
|
||||
|
||||
// Verifies that p was returned by new[], has not been deleted, and is
|
||||
// valid. Returns true if p is good; dumps core otherwise. A no-op
|
||||
// except in debug mode. Even in debug mode, may not do any checking
|
||||
// except with certain malloc implementations. Thread-safe.
|
||||
virtual bool VerifyArrayNewMemory(const void* p);
|
||||
|
||||
// Verifies that p was returned by malloc, has not been freed, and is
|
||||
// valid. Returns true if p is good; dumps core otherwise. A no-op
|
||||
// except in debug mode. Even in debug mode, may not do any checking
|
||||
// except with certain malloc implementations. Thread-safe.
|
||||
virtual bool VerifyMallocMemory(const void* p);
|
||||
|
||||
// If statistics collection is enabled, sets *blocks to be the number of
|
||||
// currently allocated blocks, sets *total to be the total size allocated
|
||||
// over all blocks, sets histogram[n] to be the number of blocks with
|
||||
// size between 2^n-1 and 2^(n+1), and returns true. Returns false, and
|
||||
// does not change *blocks, *total, or *histogram, if statistics
|
||||
// collection is disabled.
|
||||
//
|
||||
// Note that these statistics reflect memory allocated by new, new[],
|
||||
// malloc(), and realloc(), but not mmap(). They may be larger (if not
|
||||
// all pages have been written to) or smaller (if pages have been
|
||||
// allocated by mmap()) than the total RSS size. They will always be
|
||||
// smaller than the total virtual memory size.
|
||||
static constexpr int kMallocHistogramSize = 64;
|
||||
virtual bool MallocMemoryStats(int* blocks, size_t* total,
|
||||
int histogram[kMallocHistogramSize]);
|
||||
|
||||
// Get a human readable description of the current state of the malloc
|
||||
// data structures. The state is stored as a null-terminated std::string
|
||||
// in a prefix of "buffer[0,buffer_length-1]".
|
||||
// REQUIRES: buffer_length > 0.
|
||||
virtual void GetStats(char* buffer, int buffer_length);
|
||||
|
||||
// Outputs to "writer" a sample of live objects and the stack traces
|
||||
// that allocated these objects. The output can be passed to pprof.
|
||||
virtual void GetHeapSample(MallocExtensionWriter* writer);
|
||||
|
||||
// Outputs to "writer" the stack traces that caused growth in the
|
||||
// address space size. The output can be passed to "pprof".
|
||||
virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer);
|
||||
|
||||
// Outputs to "writer" a fragmentation profile. The output can be
|
||||
// passed to "pprof". In particular, the result is a list of
|
||||
// <n,total,stacktrace> tuples that says that "total" bytes in "n"
|
||||
// objects are currently unusable because of fragmentation caused by
|
||||
// an allocation with the specified "stacktrace".
|
||||
virtual void GetFragmentationProfile(MallocExtensionWriter* writer);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Control operations for getting and setting malloc implementation
|
||||
// specific parameters. Some currently useful properties:
|
||||
//
|
||||
// generic
|
||||
// -------
|
||||
// "generic.current_allocated_bytes"
|
||||
// Number of bytes currently allocated by application
|
||||
// This property is not writable.
|
||||
//
|
||||
// "generic.heap_size"
|
||||
// Number of bytes in the heap ==
|
||||
// current_allocated_bytes +
|
||||
// fragmentation +
|
||||
// freed memory regions
|
||||
// This property is not writable.
|
||||
//
|
||||
// tcmalloc
|
||||
// --------
|
||||
// "tcmalloc.max_total_thread_cache_bytes"
|
||||
// Upper limit on total number of bytes stored across all
|
||||
// per-thread caches. Default: 16MB.
|
||||
//
|
||||
// "tcmalloc.current_total_thread_cache_bytes"
|
||||
// Number of bytes used across all thread caches.
|
||||
// This property is not writable.
|
||||
//
|
||||
// "tcmalloc.pageheap_free_bytes"
|
||||
// Number of bytes in free, mapped pages in page heap. These
|
||||
// bytes can be used to fulfill allocation requests. They
|
||||
// always count towards virtual memory usage, and unless the
|
||||
// underlying memory is swapped out by the OS, they also count
|
||||
// towards physical memory usage. This property is not writable.
|
||||
//
|
||||
// "tcmalloc.pageheap_unmapped_bytes"
|
||||
// Number of bytes in free, unmapped pages in page heap.
|
||||
// These are bytes that have been released back to the OS,
|
||||
// possibly by one of the MallocExtension "Release" calls.
|
||||
// They can be used to fulfill allocation requests, but
|
||||
// typically incur a page fault. They always count towards
|
||||
// virtual memory usage, and depending on the OS, typically
|
||||
// do not count towards physical memory usage. This property
|
||||
// is not writable.
|
||||
//
|
||||
// "tcmalloc.per_cpu_caches_active"
|
||||
// Whether tcmalloc is using per-CPU caches (1 or 0 respectively).
|
||||
// This property is not writable.
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Get the named "property"'s value. Returns true if the property
|
||||
// is known. Returns false if the property is not a valid property
|
||||
// name for the current malloc implementation.
|
||||
// REQUIRES: property != null; value != null
|
||||
virtual bool GetNumericProperty(const char* property, size_t* value);
|
||||
|
||||
// Set the named "property"'s value. Returns true if the property
|
||||
// is known and writable. Returns false if the property is not a
|
||||
// valid property name for the current malloc implementation, or
|
||||
// is not writable.
|
||||
// REQUIRES: property != null
|
||||
virtual bool SetNumericProperty(const char* property, size_t value);
|
||||
|
||||
// Mark the current thread as "idle". This routine may optionally
|
||||
// be called by threads as a hint to the malloc implementation that
|
||||
// any thread-specific resources should be released. Note: this may
|
||||
// be an expensive routine, so it should not be called too often.
|
||||
//
|
||||
// Also, if the code that calls this routine will go to sleep for
|
||||
// a while, it should take care to not allocate anything between
|
||||
// the call to this routine and the beginning of the sleep.
|
||||
//
|
||||
// Most malloc implementations ignore this routine.
|
||||
virtual void MarkThreadIdle();
|
||||
|
||||
// Mark the current thread as "busy". This routine should be
|
||||
// called after MarkThreadIdle() if the thread will now do more
|
||||
// work. If this method is not called, performance may suffer.
|
||||
//
|
||||
// Most malloc implementations ignore this routine.
|
||||
virtual void MarkThreadBusy();
|
||||
|
||||
// Attempt to free any resources associated with cpu <cpu> (in the sense
|
||||
// of only being usable from that CPU.) Returns the number of bytes
|
||||
// previously assigned to "cpu" that were freed. Safe to call from
|
||||
// any processor, not just <cpu>.
|
||||
//
|
||||
// Most malloc implementations ignore this routine (known exceptions:
|
||||
// tcmalloc with --tcmalloc_per_cpu_caches=true.)
|
||||
virtual size_t ReleaseCPUMemory(int cpu);
|
||||
|
||||
// Gets the system allocator used by the malloc extension instance. Returns
|
||||
// null for malloc implementations that do not support pluggable system
|
||||
// allocators.
|
||||
virtual SysAllocator* GetSystemAllocator();
|
||||
|
||||
// Sets the system allocator to the specified.
|
||||
//
|
||||
// Users could register their own system allocators for malloc implementation
|
||||
// that supports pluggable system allocators, such as TCMalloc, by doing:
|
||||
// alloc = new MyOwnSysAllocator();
|
||||
// MallocExtension::instance()->SetSystemAllocator(alloc);
|
||||
// It's up to users whether to fall back (recommended) to the default
|
||||
// system allocator (use GetSystemAllocator() above) or not. The caller is
|
||||
// responsible to any necessary locking.
|
||||
// See tcmalloc/system-alloc.h for the interface and
|
||||
// tcmalloc/memfs_malloc.cc for the examples.
|
||||
//
|
||||
// It's a no-op for malloc implementations that do not support pluggable
|
||||
// system allocators.
|
||||
virtual void SetSystemAllocator(SysAllocator *a);
|
||||
|
||||
// Try to release num_bytes of free memory back to the operating
|
||||
// system for reuse. Use this extension with caution -- to get this
|
||||
// memory back may require faulting pages back in by the OS, and
|
||||
// that may be slow. (Currently only implemented in tcmalloc.)
|
||||
virtual void ReleaseToSystem(size_t num_bytes);
|
||||
|
||||
// Same as ReleaseToSystem() but release as much memory as possible.
|
||||
virtual void ReleaseFreeMemory();
|
||||
|
||||
// Sets the rate at which we release unused memory to the system.
|
||||
// Zero means we never release memory back to the system. Increase
|
||||
// this flag to return memory faster; decrease it to return memory
|
||||
// slower. Reasonable rates are in the range [0,10]. (Currently
|
||||
// only implemented in tcmalloc).
|
||||
virtual void SetMemoryReleaseRate(double rate);
|
||||
|
||||
// Gets the release rate. Returns a value < 0 if unknown.
|
||||
virtual double GetMemoryReleaseRate();
|
||||
|
||||
// Returns the estimated number of bytes that will be allocated for
|
||||
// a request of "size" bytes. This is an estimate: an allocation of
|
||||
// SIZE bytes may reserve more bytes, but will never reserve less.
|
||||
// (Currently only implemented in tcmalloc, other implementations
|
||||
// always return SIZE.)
|
||||
// This is equivalent to malloc_good_size() in OS X.
|
||||
virtual size_t GetEstimatedAllocatedSize(size_t size);
|
||||
|
||||
// Returns the actual number N of bytes reserved by tcmalloc for the
|
||||
// pointer p. This number may be equal to or greater than the
|
||||
// number of bytes requested when p was allocated.
|
||||
//
|
||||
// This routine is just useful for statistics collection. The
|
||||
// client must *not* read or write from the extra bytes that are
|
||||
// indicated by this call.
|
||||
//
|
||||
// Example, suppose the client gets memory by calling
|
||||
// p = malloc(10)
|
||||
// and GetAllocatedSize(p) returns 16. The client must only use the
|
||||
// first 10 bytes p[0..9], and not attempt to read or write p[10..15].
|
||||
//
|
||||
// p must have been allocated by this malloc implementation, must
|
||||
// not be an interior pointer -- that is, must be exactly the
|
||||
// pointer returned to by malloc() et al., not some offset from that
|
||||
// -- and should not have been freed yet. p may be null.
|
||||
// (Currently only implemented in tcmalloc; other implementations
|
||||
// will return 0.)
|
||||
virtual size_t GetAllocatedSize(const void* p);
|
||||
|
||||
// Returns kOwned if this malloc implementation allocated the memory
|
||||
// pointed to by p, or kNotOwned if some other malloc implementation
|
||||
// allocated it or p is null. May also return kUnknownOwnership if
|
||||
// the malloc implementation does not keep track of ownership.
|
||||
// REQUIRES: p must be a value returned from a previous call to
|
||||
// malloc(), calloc(), realloc(), memalign(), posix_memalign(),
|
||||
// valloc(), pvalloc(), new, or new[], and must refer to memory that
|
||||
// is currently allocated (so, for instance, you should not pass in
|
||||
// a pointer after having called free() on it).
|
||||
enum Ownership {
|
||||
// NOTE: Enum values MUST be kept in sync with the version in
|
||||
// malloc_extension_c.h
|
||||
kUnknownOwnership = 0,
|
||||
kOwned,
|
||||
kNotOwned
|
||||
};
|
||||
virtual Ownership GetOwnership(const void* p);
|
||||
|
||||
// The current malloc implementation. Always non-null.
|
||||
static MallocExtension* instance() {
|
||||
InitModuleOnce();
|
||||
return current_instance_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
// Change the malloc implementation. Typically called by the
|
||||
// malloc implementation during initialization.
|
||||
static void Register(MallocExtension* implementation);
|
||||
|
||||
// Type used by GetProperties. See comment on GetProperties.
|
||||
struct Property {
|
||||
size_t value;
|
||||
// Stores breakdown of the property value bucketed by object size.
|
||||
struct Bucket {
|
||||
size_t min_object_size;
|
||||
size_t max_object_size;
|
||||
size_t size;
|
||||
};
|
||||
// Empty unless detailed info was asked for and this type has buckets
|
||||
std::vector<Bucket> buckets;
|
||||
};
|
||||
|
||||
// Type used by GetProperties. See comment on GetProperties.
|
||||
enum StatLevel { kSummary, kDetailed };
|
||||
|
||||
// Stores in *result detailed statistics about the malloc
|
||||
// implementation. *result will be a map keyed by the name of
|
||||
// the statistic. Each statistic has at least a "value" field.
|
||||
//
|
||||
// Some statistics may also contain an array of buckets if
|
||||
// level==kDetailed and the "value" can be subdivided
|
||||
// into different buckets for different object sizes. If
|
||||
// such detailed statistics are not available, Property::buckets
|
||||
// will be empty. Otherwise Property::buckets will contain
|
||||
// potentially many entries. For each bucket b, b.value
|
||||
// will count the value contributed by objects in the range
|
||||
// [b.min_object_size, b.max_object_size].
|
||||
//
|
||||
// Common across malloc implementations:
|
||||
// generic.bytes_in_use_by_app -- Bytes currently in use by application
|
||||
// generic.physical_memory_used -- Overall (including malloc internals)
|
||||
// generic.virtual_memory_used -- Overall (including malloc internals)
|
||||
//
|
||||
// Tcmalloc specific properties
|
||||
// tcmalloc.cpu_free -- Bytes in per-cpu free-lists
|
||||
// tcmalloc.thread_cache_free -- Bytes in per-thread free-lists
|
||||
// tcmalloc.transfer_cache -- Bytes in cross-thread transfer caches
|
||||
// tcmalloc.central_cache_free -- Bytes in central cache
|
||||
// tcmalloc.page_heap_free -- Bytes in page heap
|
||||
// tcmalloc.page_heap_unmapped -- Bytes in page heap (no backing phys. mem)
|
||||
// tcmalloc.metadata_bytes -- Used by internal data structures
|
||||
// tcmalloc.thread_cache_count -- Number of thread caches in use
|
||||
//
|
||||
// Debug allocator
|
||||
// debug.free_queue -- Recently freed objects
|
||||
virtual void GetProperties(StatLevel level,
|
||||
std::map<std::string, Property>* result);
|
||||
private:
|
||||
static MallocExtension* InitModule();
|
||||
|
||||
static void InitModuleOnce() {
|
||||
// Pointer stored here so heap leak checker will consider the default
|
||||
// instance reachable, even if current_instance_ is later overridden by
|
||||
// MallocExtension::Register().
|
||||
ABSL_ATTRIBUTE_UNUSED static MallocExtension* default_instance =
|
||||
InitModule();
|
||||
}
|
||||
|
||||
static std::atomic<MallocExtension*> current_instance_;
|
||||
};
|
||||
|
||||
// Base class than can handle output generated by GetHeapSample() and
|
||||
// GetHeapGrowthStacks(). Use the available subclass or roll your
|
||||
// own. Useful if you want explicit control over the type of output
|
||||
// buffer used (e.g. IOBuffer, Cord, etc.)
|
||||
class MallocExtensionWriter {
|
||||
public:
|
||||
virtual ~MallocExtensionWriter() {}
|
||||
virtual void Write(const char* buf, int len) = 0;
|
||||
protected:
|
||||
MallocExtensionWriter() {}
|
||||
MallocExtensionWriter(const MallocExtensionWriter&) = delete;
|
||||
MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete;
|
||||
|
||||
private:
|
||||
virtual void UnusedKeyMethod(); // Dummy key method to avoid weak vtable.
|
||||
};
|
||||
|
||||
// A subclass that writes to the std::string "out". NOTE: The generated
|
||||
// data is *appended* to "*out". I.e., the old contents of "*out" are
|
||||
// preserved.
|
||||
class StringMallocExtensionWriter : public MallocExtensionWriter {
|
||||
public:
|
||||
explicit StringMallocExtensionWriter(std::string* out) : out_(out) {}
|
||||
void Write(const char* buf, int len) override;
|
||||
|
||||
private:
|
||||
std::string* const out_;
|
||||
StringMallocExtensionWriter(const StringMallocExtensionWriter&) = delete;
|
||||
StringMallocExtensionWriter& operator=(const StringMallocExtensionWriter&) =
|
||||
delete;
|
||||
};
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
// The nallocx function allocates no memory, but it performs the same size
|
||||
// computation as the malloc function, and returns the real size of the
|
||||
// allocation that would result from the equivalent malloc function call.
|
||||
// Default weak implementation returns size unchanged, but tcmalloc overrides it
|
||||
// and returns rounded up size. See the following link for details:
|
||||
// http://www.unix.com/man-page/freebsd/3/nallocx/
|
||||
extern "C" size_t nallocx(size_t size, int flags);
|
||||
|
||||
#ifndef MALLOCX_LG_ALIGN
|
||||
#define MALLOCX_LG_ALIGN(la) (la)
|
||||
#endif
|
||||
|
||||
#endif // ABSL_BASE_INTERNAL_MALLOC_EXTENSION_H_
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The Abseil Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
|
||||
* C shims for the C++ malloc_extension.h. See malloc_extension.h for
|
||||
* details. Note these C shims always work on
|
||||
* MallocExtension::instance(); it is not possible to have more than
|
||||
* one MallocExtension object in C applications.
|
||||
*/
|
||||
|
||||
#ifndef ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_
|
||||
#define ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define kMallocExtensionHistogramSize 64
|
||||
|
||||
int MallocExtension_VerifyAllMemory(void);
|
||||
int MallocExtension_VerifyNewMemory(const void* p);
|
||||
int MallocExtension_VerifyArrayNewMemory(const void* p);
|
||||
int MallocExtension_VerifyMallocMemory(const void* p);
|
||||
int MallocExtension_MallocMemoryStats(int* blocks, size_t* total,
|
||||
int histogram[kMallocExtensionHistogramSize]);
|
||||
|
||||
void MallocExtension_GetStats(char* buffer, int buffer_length);
|
||||
|
||||
/* TODO(csilvers): write a C version of these routines, that perhaps
|
||||
* takes a function ptr and a void *.
|
||||
*/
|
||||
/* void MallocExtension_GetHeapSample(MallocExtensionWriter* result); */
|
||||
/* void MallocExtension_GetHeapGrowthStacks(MallocExtensionWriter* result); */
|
||||
|
||||
int MallocExtension_GetNumericProperty(const char* property, size_t* value);
|
||||
int MallocExtension_SetNumericProperty(const char* property, size_t value);
|
||||
void MallocExtension_MarkThreadIdle(void);
|
||||
void MallocExtension_MarkThreadBusy(void);
|
||||
void MallocExtension_ReleaseToSystem(size_t num_bytes);
|
||||
void MallocExtension_ReleaseFreeMemory(void);
|
||||
size_t MallocExtension_GetEstimatedAllocatedSize(size_t size);
|
||||
size_t MallocExtension_GetAllocatedSize(const void* p);
|
||||
|
||||
/*
|
||||
* NOTE: These enum values MUST be kept in sync with the version in
|
||||
* malloc_extension.h
|
||||
*/
|
||||
typedef enum {
|
||||
MallocExtension_kUnknownOwnership = 0,
|
||||
MallocExtension_kOwned,
|
||||
MallocExtension_kNotOwned
|
||||
} MallocExtension_Ownership;
|
||||
|
||||
MallocExtension_Ownership MallocExtension_GetOwnership(const void* p);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif /* ABSL_BASE_INTERNAL_MALLOC_EXTENSION_C_H_ */
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The Abseil Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "absl/base/internal/malloc_extension.h"
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
namespace {
|
||||
|
||||
TEST(MallocExtension, MallocExtension) {
|
||||
void* a = malloc(1000);
|
||||
|
||||
size_t cxx_bytes_used, c_bytes_used;
|
||||
if (!MallocExtension::instance()->GetNumericProperty(
|
||||
"generic.current_allocated_bytes", &cxx_bytes_used)) {
|
||||
EXPECT_TRUE(ABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION);
|
||||
} else {
|
||||
ASSERT_TRUE(MallocExtension::instance()->GetNumericProperty(
|
||||
"generic.current_allocated_bytes", &cxx_bytes_used));
|
||||
#ifndef MEMORY_SANITIZER
|
||||
EXPECT_GT(cxx_bytes_used, 1000);
|
||||
EXPECT_GT(c_bytes_used, 1000);
|
||||
#endif
|
||||
|
||||
EXPECT_TRUE(MallocExtension::instance()->VerifyAllMemory());
|
||||
|
||||
EXPECT_EQ(MallocExtension::kOwned,
|
||||
MallocExtension::instance()->GetOwnership(a));
|
||||
// TODO(csilvers): this relies on undocumented behavior that
|
||||
// GetOwnership works on stack-allocated variables. Use a better test.
|
||||
EXPECT_EQ(MallocExtension::kNotOwned,
|
||||
MallocExtension::instance()->GetOwnership(&cxx_bytes_used));
|
||||
EXPECT_EQ(MallocExtension::kNotOwned,
|
||||
MallocExtension::instance()->GetOwnership(nullptr));
|
||||
EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000);
|
||||
// This is just a sanity check. If we allocated too much, tcmalloc is
|
||||
// broken
|
||||
EXPECT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000);
|
||||
EXPECT_GE(MallocExtension::instance()->GetEstimatedAllocatedSize(1000),
|
||||
1000);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
void* p = malloc(i);
|
||||
EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(p),
|
||||
MallocExtension::instance()->GetEstimatedAllocatedSize(i));
|
||||
free(p);
|
||||
}
|
||||
}
|
||||
|
||||
free(a);
|
||||
}
|
||||
|
||||
TEST(nallocx, SaneBehavior) {
|
||||
for (size_t size = 0; size < 64 * 1024; ++size) {
|
||||
size_t alloc_size = nallocx(size, 0);
|
||||
EXPECT_LE(size, alloc_size) << "size is " << size;
|
||||
EXPECT_LE(alloc_size, std::max(size + 100, 2 * size)) << "size is " << size;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
|
@ -1,574 +0,0 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/config.h"
|
||||
|
||||
#if ABSL_HAVE_MMAP
|
||||
// Disable the glibc prototype of mremap(), as older versions of the
|
||||
// system headers define this function with only four arguments,
|
||||
// whereas newer versions allow an optional fifth argument:
|
||||
#define mremap glibc_mremap
|
||||
#include <sys/mman.h>
|
||||
#undef mremap
|
||||
#endif
|
||||
|
||||
#include "absl/base/internal/malloc_hook.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/base/call_once.h"
|
||||
#include "absl/base/internal/malloc_hook_invoke.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/internal/spinlock.h"
|
||||
#include "absl/base/macros.h"
|
||||
|
||||
// __THROW is defined in glibc systems. It means, counter-intuitively,
|
||||
// "This function will never throw an exception." It's an optional
|
||||
// optimization tool, but we may need to use it to match glibc prototypes.
|
||||
#ifndef __THROW // I guess we're not on a glibc system
|
||||
# define __THROW // __THROW is just an optimization, so ok to make it ""
|
||||
#endif
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
namespace {
|
||||
|
||||
void RemoveInitialHooksAndCallInitializers(); // below.
|
||||
|
||||
absl::once_flag once;
|
||||
|
||||
// These hooks are installed in MallocHook as the only initial hooks. The first
|
||||
// hook that is called will run RemoveInitialHooksAndCallInitializers (see the
|
||||
// definition below) and then redispatch to any malloc hooks installed by
|
||||
// RemoveInitialHooksAndCallInitializers.
|
||||
//
|
||||
// Note(llib): there is a possibility of a race in the event that there are
|
||||
// multiple threads running before the first allocation. This is pretty
|
||||
// difficult to achieve, but if it is then multiple threads may concurrently do
|
||||
// allocations. The first caller will call
|
||||
// RemoveInitialHooksAndCallInitializers via one of the initial hooks. A
|
||||
// concurrent allocation may, depending on timing either:
|
||||
// * still have its initial malloc hook installed, run that and block on waiting
|
||||
// for the first caller to finish its call to
|
||||
// RemoveInitialHooksAndCallInitializers, and proceed normally.
|
||||
// * occur some time during the RemoveInitialHooksAndCallInitializers call, at
|
||||
// which point there could be no initial hooks and the subsequent hooks that
|
||||
// are about to be set up by RemoveInitialHooksAndCallInitializers haven't
|
||||
// been installed yet. I think the worst we can get is that some allocations
|
||||
// will not get reported to some hooks set by the initializers called from
|
||||
// RemoveInitialHooksAndCallInitializers.
|
||||
|
||||
void InitialNewHook(const void* ptr, size_t size) {
|
||||
absl::call_once(once, RemoveInitialHooksAndCallInitializers);
|
||||
MallocHook::InvokeNewHook(ptr, size);
|
||||
}
|
||||
|
||||
void InitialPreMMapHook(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset) {
|
||||
absl::call_once(once, RemoveInitialHooksAndCallInitializers);
|
||||
MallocHook::InvokePreMmapHook(start, size, protection, flags, fd, offset);
|
||||
}
|
||||
|
||||
void InitialPreSbrkHook(ptrdiff_t increment) {
|
||||
absl::call_once(once, RemoveInitialHooksAndCallInitializers);
|
||||
MallocHook::InvokePreSbrkHook(increment);
|
||||
}
|
||||
|
||||
// This function is called at most once by one of the above initial malloc
|
||||
// hooks. It removes all initial hooks and initializes all other clients that
|
||||
// want to get control at the very first memory allocation. The initializers
|
||||
// may assume that the initial malloc hooks have been removed. The initializers
|
||||
// may set up malloc hooks and allocate memory.
|
||||
void RemoveInitialHooksAndCallInitializers() {
|
||||
ABSL_RAW_CHECK(MallocHook::RemoveNewHook(&InitialNewHook), "");
|
||||
ABSL_RAW_CHECK(MallocHook::RemovePreMmapHook(&InitialPreMMapHook), "");
|
||||
ABSL_RAW_CHECK(MallocHook::RemovePreSbrkHook(&InitialPreSbrkHook), "");
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
// This lock is shared between all implementations of HookList::Add & Remove.
|
||||
// The potential for contention is very small. This needs to be a SpinLock and
|
||||
// not a Mutex since it's possible for Mutex locking to allocate memory (e.g.,
|
||||
// per-thread allocation in debug builds), which could cause infinite recursion.
|
||||
static absl::base_internal::SpinLock hooklist_spinlock(
|
||||
absl::base_internal::kLinkerInitialized);
|
||||
|
||||
template <typename T>
|
||||
bool HookList<T>::Add(T value_as_t) {
|
||||
if (value_as_t == T()) {
|
||||
return false;
|
||||
}
|
||||
absl::base_internal::SpinLockHolder l(&hooklist_spinlock);
|
||||
// Find the first slot in data that is 0.
|
||||
int index = 0;
|
||||
while ((index < kHookListMaxValues) &&
|
||||
(priv_data[index].load(std::memory_order_relaxed) != 0)) {
|
||||
++index;
|
||||
}
|
||||
if (index == kHookListMaxValues) {
|
||||
return false;
|
||||
}
|
||||
int prev_num_hooks = priv_end.load(std::memory_order_acquire);
|
||||
priv_data[index].store(reinterpret_cast<intptr_t>(value_as_t),
|
||||
std::memory_order_release);
|
||||
if (prev_num_hooks <= index) {
|
||||
priv_end.store(index + 1, std::memory_order_release);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool HookList<T>::Remove(T value_as_t) {
|
||||
if (value_as_t == T()) {
|
||||
return false;
|
||||
}
|
||||
absl::base_internal::SpinLockHolder l(&hooklist_spinlock);
|
||||
int hooks_end = priv_end.load(std::memory_order_acquire);
|
||||
int index = 0;
|
||||
while (index < hooks_end &&
|
||||
value_as_t != reinterpret_cast<T>(
|
||||
priv_data[index].load(std::memory_order_acquire))) {
|
||||
++index;
|
||||
}
|
||||
if (index == hooks_end) {
|
||||
return false;
|
||||
}
|
||||
priv_data[index].store(0, std::memory_order_release);
|
||||
if (hooks_end == index + 1) {
|
||||
// Adjust hooks_end down to the lowest possible value.
|
||||
hooks_end = index;
|
||||
while ((hooks_end > 0) &&
|
||||
(priv_data[hooks_end - 1].load(std::memory_order_acquire) == 0)) {
|
||||
--hooks_end;
|
||||
}
|
||||
priv_end.store(hooks_end, std::memory_order_release);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
int HookList<T>::Traverse(T* output_array, int n) const {
|
||||
int hooks_end = priv_end.load(std::memory_order_acquire);
|
||||
int actual_hooks_end = 0;
|
||||
for (int i = 0; i < hooks_end && n > 0; ++i) {
|
||||
T data = reinterpret_cast<T>(priv_data[i].load(std::memory_order_acquire));
|
||||
if (data != T()) {
|
||||
*output_array++ = data;
|
||||
++actual_hooks_end;
|
||||
--n;
|
||||
}
|
||||
}
|
||||
return actual_hooks_end;
|
||||
}
|
||||
|
||||
// Initialize a HookList (optionally with the given initial_value in index 0).
|
||||
#define INIT_HOOK_LIST { {0}, {{}} }
|
||||
#define INIT_HOOK_LIST_WITH_VALUE(initial_value) \
|
||||
{ {1}, { {reinterpret_cast<intptr_t>(initial_value)} } }
|
||||
|
||||
// Explicit instantiation for malloc_hook_test.cc. This ensures all the methods
|
||||
// are instantiated.
|
||||
template struct HookList<MallocHook::NewHook>;
|
||||
|
||||
HookList<MallocHook::NewHook> new_hooks_ =
|
||||
INIT_HOOK_LIST_WITH_VALUE(&InitialNewHook);
|
||||
HookList<MallocHook::DeleteHook> delete_hooks_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::SampledNewHook> sampled_new_hooks_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::SampledDeleteHook> sampled_delete_hooks_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::PreMmapHook> premmap_hooks_ =
|
||||
INIT_HOOK_LIST_WITH_VALUE(&InitialPreMMapHook);
|
||||
HookList<MallocHook::MmapHook> mmap_hooks_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::MunmapHook> munmap_hooks_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::MremapHook> mremap_hooks_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::PreSbrkHook> presbrk_hooks_ =
|
||||
INIT_HOOK_LIST_WITH_VALUE(InitialPreSbrkHook);
|
||||
HookList<MallocHook::SbrkHook> sbrk_hooks_ = INIT_HOOK_LIST;
|
||||
|
||||
// These lists contain either 0 or 1 hooks.
|
||||
HookList<MallocHook::MmapReplacement> mmap_replacement_ = INIT_HOOK_LIST;
|
||||
HookList<MallocHook::MunmapReplacement> munmap_replacement_ = INIT_HOOK_LIST;
|
||||
|
||||
#undef INIT_HOOK_LIST_WITH_VALUE
|
||||
#undef INIT_HOOK_LIST
|
||||
|
||||
bool MallocHook::AddNewHook(NewHook hook) { return new_hooks_.Add(hook); }
|
||||
|
||||
bool MallocHook::RemoveNewHook(NewHook hook) { return new_hooks_.Remove(hook); }
|
||||
|
||||
bool MallocHook::AddDeleteHook(DeleteHook hook) {
|
||||
return delete_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveDeleteHook(DeleteHook hook) {
|
||||
return delete_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddSampledNewHook(SampledNewHook hook) {
|
||||
return sampled_new_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveSampledNewHook(SampledNewHook hook) {
|
||||
return sampled_new_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddSampledDeleteHook(SampledDeleteHook hook) {
|
||||
return sampled_delete_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveSampledDeleteHook(SampledDeleteHook hook) {
|
||||
return sampled_delete_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddPreMmapHook(PreMmapHook hook) {
|
||||
return premmap_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemovePreMmapHook(PreMmapHook hook) {
|
||||
return premmap_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::SetMmapReplacement(MmapReplacement hook) {
|
||||
// NOTE this is a best effort CHECK. Concurrent sets could succeed since
|
||||
// this test is outside of the Add spin lock.
|
||||
ABSL_RAW_CHECK(mmap_replacement_.empty(),
|
||||
"Only one MMapReplacement is allowed.");
|
||||
return mmap_replacement_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveMmapReplacement(MmapReplacement hook) {
|
||||
return mmap_replacement_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddMmapHook(MmapHook hook) { return mmap_hooks_.Add(hook); }
|
||||
|
||||
bool MallocHook::RemoveMmapHook(MmapHook hook) {
|
||||
return mmap_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::SetMunmapReplacement(MunmapReplacement hook) {
|
||||
// NOTE this is a best effort CHECK. Concurrent sets could succeed since
|
||||
// this test is outside of the Add spin lock.
|
||||
ABSL_RAW_CHECK(munmap_replacement_.empty(),
|
||||
"Only one MunmapReplacement is allowed.");
|
||||
return munmap_replacement_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveMunmapReplacement(MunmapReplacement hook) {
|
||||
return munmap_replacement_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddMunmapHook(MunmapHook hook) {
|
||||
return munmap_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveMunmapHook(MunmapHook hook) {
|
||||
return munmap_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddMremapHook(MremapHook hook) {
|
||||
return mremap_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemoveMremapHook(MremapHook hook) {
|
||||
return mremap_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddPreSbrkHook(PreSbrkHook hook) {
|
||||
return presbrk_hooks_.Add(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::RemovePreSbrkHook(PreSbrkHook hook) {
|
||||
return presbrk_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
bool MallocHook::AddSbrkHook(SbrkHook hook) { return sbrk_hooks_.Add(hook); }
|
||||
|
||||
bool MallocHook::RemoveSbrkHook(SbrkHook hook) {
|
||||
return sbrk_hooks_.Remove(hook);
|
||||
}
|
||||
|
||||
// Note: embedding the function calls inside the traversal of HookList would be
|
||||
// very confusing, as it is legal for a hook to remove itself and add other
|
||||
// hooks. Doing traversal first, and then calling the hooks ensures we only
|
||||
// call the hooks registered at the start.
|
||||
#define INVOKE_HOOKS(HookType, hook_list, args) \
|
||||
do { \
|
||||
HookType hooks[kHookListMaxValues]; \
|
||||
int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \
|
||||
for (int i = 0; i < num_hooks; ++i) { \
|
||||
(*hooks[i]) args; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// There should only be one replacement. Return the result of the first
|
||||
// one, or false if there is none.
|
||||
#define INVOKE_REPLACEMENT(HookType, hook_list, args) \
|
||||
do { \
|
||||
HookType hooks[kHookListMaxValues]; \
|
||||
int num_hooks = hook_list.Traverse(hooks, kHookListMaxValues); \
|
||||
return (num_hooks > 0 && (*hooks[0])args); \
|
||||
} while (0)
|
||||
|
||||
void MallocHook::InvokeNewHookSlow(const void* ptr, size_t size) {
|
||||
INVOKE_HOOKS(NewHook, new_hooks_, (ptr, size));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeDeleteHookSlow(const void* ptr) {
|
||||
INVOKE_HOOKS(DeleteHook, delete_hooks_, (ptr));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc) {
|
||||
INVOKE_HOOKS(SampledNewHook, sampled_new_hooks_, (sampled_alloc));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeSampledDeleteHookSlow(AllocHandle handle) {
|
||||
INVOKE_HOOKS(SampledDeleteHook, sampled_delete_hooks_, (handle));
|
||||
}
|
||||
|
||||
void MallocHook::InvokePreMmapHookSlow(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset) {
|
||||
INVOKE_HOOKS(PreMmapHook, premmap_hooks_, (start, size, protection, flags, fd,
|
||||
offset));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeMmapHookSlow(const void* result,
|
||||
const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset) {
|
||||
INVOKE_HOOKS(MmapHook, mmap_hooks_, (result, start, size, protection, flags,
|
||||
fd, offset));
|
||||
}
|
||||
|
||||
bool MallocHook::InvokeMmapReplacementSlow(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset,
|
||||
void** result) {
|
||||
INVOKE_REPLACEMENT(MmapReplacement, mmap_replacement_,
|
||||
(start, size, protection, flags, fd, offset, result));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeMunmapHookSlow(const void* start, size_t size) {
|
||||
INVOKE_HOOKS(MunmapHook, munmap_hooks_, (start, size));
|
||||
}
|
||||
|
||||
bool MallocHook::InvokeMunmapReplacementSlow(const void* start,
|
||||
size_t size,
|
||||
int* result) {
|
||||
INVOKE_REPLACEMENT(MunmapReplacement, munmap_replacement_,
|
||||
(start, size, result));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeMremapHookSlow(const void* result,
|
||||
const void* old_addr,
|
||||
size_t old_size,
|
||||
size_t new_size,
|
||||
int flags,
|
||||
const void* new_addr) {
|
||||
INVOKE_HOOKS(MremapHook, mremap_hooks_, (result, old_addr, old_size, new_size,
|
||||
flags, new_addr));
|
||||
}
|
||||
|
||||
void MallocHook::InvokePreSbrkHookSlow(ptrdiff_t increment) {
|
||||
INVOKE_HOOKS(PreSbrkHook, presbrk_hooks_, (increment));
|
||||
}
|
||||
|
||||
void MallocHook::InvokeSbrkHookSlow(const void* result, ptrdiff_t increment) {
|
||||
INVOKE_HOOKS(SbrkHook, sbrk_hooks_, (result, increment));
|
||||
}
|
||||
|
||||
#undef INVOKE_HOOKS
|
||||
#undef INVOKE_REPLACEMENT
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(malloc_hook);
|
||||
ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(malloc_hook);
|
||||
// actual functions are in this file, malloc_hook.cc, and low_level_alloc.cc
|
||||
ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(google_malloc);
|
||||
ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(google_malloc);
|
||||
ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(blink_malloc);
|
||||
ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(blink_malloc);
|
||||
|
||||
#define ADDR_IN_ATTRIBUTE_SECTION(addr, name) \
|
||||
(reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_START(name)) <= \
|
||||
reinterpret_cast<uintptr_t>(addr) && \
|
||||
reinterpret_cast<uintptr_t>(addr) < \
|
||||
reinterpret_cast<uintptr_t>(ABSL_ATTRIBUTE_SECTION_STOP(name)))
|
||||
|
||||
// Return true iff 'caller' is a return address within a function
|
||||
// that calls one of our hooks via MallocHook:Invoke*.
|
||||
// A helper for GetCallerStackTrace.
|
||||
static inline bool InHookCaller(const void* caller) {
|
||||
return ADDR_IN_ATTRIBUTE_SECTION(caller, google_malloc) ||
|
||||
ADDR_IN_ATTRIBUTE_SECTION(caller, malloc_hook) ||
|
||||
ADDR_IN_ATTRIBUTE_SECTION(caller, blink_malloc);
|
||||
|
||||
// We can use one section for everything except tcmalloc_or_debug
|
||||
// due to its special linkage mode, which prevents merging of the sections.
|
||||
}
|
||||
|
||||
#undef ADDR_IN_ATTRIBUTE_SECTION
|
||||
|
||||
static absl::once_flag in_hook_caller_once;
|
||||
|
||||
static void InitializeInHookCaller() {
|
||||
ABSL_INIT_ATTRIBUTE_SECTION_VARS(malloc_hook);
|
||||
if (ABSL_ATTRIBUTE_SECTION_START(malloc_hook) ==
|
||||
ABSL_ATTRIBUTE_SECTION_STOP(malloc_hook)) {
|
||||
ABSL_RAW_LOG(ERROR,
|
||||
"malloc_hook section is missing, "
|
||||
"thus InHookCaller is broken!");
|
||||
}
|
||||
ABSL_INIT_ATTRIBUTE_SECTION_VARS(google_malloc);
|
||||
if (ABSL_ATTRIBUTE_SECTION_START(google_malloc) ==
|
||||
ABSL_ATTRIBUTE_SECTION_STOP(google_malloc)) {
|
||||
ABSL_RAW_LOG(ERROR,
|
||||
"google_malloc section is missing, "
|
||||
"thus InHookCaller is broken!");
|
||||
}
|
||||
ABSL_INIT_ATTRIBUTE_SECTION_VARS(blink_malloc);
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
int MallocHook::GetCallerStackTrace(void** result, int max_depth,
|
||||
int skip_count,
|
||||
GetStackTraceFn get_stack_trace_fn) {
|
||||
if (!ABSL_HAVE_ATTRIBUTE_SECTION) {
|
||||
// Fall back to get_stack_trace_fn and good old but fragile frame skip
|
||||
// counts.
|
||||
// Note: this path is inaccurate when a hook is not called directly by an
|
||||
// allocation function but is daisy-chained through another hook,
|
||||
// search for MallocHook::(Get|Set|Invoke)* to find such cases.
|
||||
#ifdef NDEBUG
|
||||
return get_stack_trace_fn(result, max_depth, skip_count);
|
||||
#else
|
||||
return get_stack_trace_fn(result, max_depth, skip_count + 1);
|
||||
#endif
|
||||
// due to -foptimize-sibling-calls in opt mode
|
||||
// there's no need for extra frame skip here then
|
||||
}
|
||||
absl::call_once(in_hook_caller_once, InitializeInHookCaller);
|
||||
// MallocHook caller determination via InHookCaller works, use it:
|
||||
static const int kMaxSkip = 32 + 6 + 3;
|
||||
// Constant tuned to do just one get_stack_trace_fn call below in practice
|
||||
// and not get many frames that we don't actually need:
|
||||
// currently max passed max_depth is 32,
|
||||
// max passed/needed skip_count is 6
|
||||
// and 3 is to account for some hook daisy chaining.
|
||||
static const int kStackSize = kMaxSkip + 1;
|
||||
void* stack[kStackSize];
|
||||
int depth =
|
||||
get_stack_trace_fn(stack, kStackSize, 1); // skip this function frame
|
||||
if (depth == 0)
|
||||
// silently propagate cases when get_stack_trace_fn does not work
|
||||
return 0;
|
||||
for (int i = depth - 1; i >= 0; --i) { // stack[0] is our immediate caller
|
||||
if (InHookCaller(stack[i])) {
|
||||
i += 1; // skip hook caller frame
|
||||
depth -= i; // correct depth
|
||||
if (depth > max_depth) depth = max_depth;
|
||||
std::copy(stack + i, stack + i + depth, result);
|
||||
if (depth < max_depth && depth + i == kStackSize) {
|
||||
// get frames for the missing depth
|
||||
depth += get_stack_trace_fn(result + depth, max_depth - depth,
|
||||
1 + kStackSize);
|
||||
}
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
ABSL_RAW_LOG(WARNING,
|
||||
"Hooked allocator frame not found, returning empty trace");
|
||||
// If this happens try increasing kMaxSkip
|
||||
// or else something must be wrong with InHookCaller,
|
||||
// e.g. for every section used in InHookCaller
|
||||
// all functions in that section must be inside the same library.
|
||||
return 0;
|
||||
}
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
// On systems where we know how, we override mmap/munmap/mremap/sbrk
|
||||
// to provide support for calling the related hooks (in addition,
|
||||
// of course, to doing what these functions normally do).
|
||||
|
||||
// The ABSL_MALLOC_HOOK_MMAP_DISABLE macro disables mmap/munmap interceptors.
|
||||
// Dynamic tools that intercept mmap/munmap can't be linked together with
|
||||
// malloc_hook interceptors. We disable the malloc_hook interceptors for the
|
||||
// widely-used dynamic tools, i.e. ThreadSanitizer and MemorySanitizer, but
|
||||
// still allow users to disable this in special cases that can't be easily
|
||||
// detected during compilation, via -DABSL_MALLOC_HOOK_MMAP_DISABLE or #define
|
||||
// ABSL_MALLOC_HOOK_MMAP_DISABLE.
|
||||
//
|
||||
// TODO(absl-team): Remove MALLOC_HOOK_MMAP_DISABLE in CROSSTOOL for tsan and
|
||||
// msan config; Replace MALLOC_HOOK_MMAP_DISABLE with
|
||||
// ABSL_MALLOC_HOOK_MMAP_DISABLE for other special cases.
|
||||
#if !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) && \
|
||||
!defined(ABSL_MALLOC_HOOK_MMAP_DISABLE) && !defined(__ANDROID__) && \
|
||||
defined(__linux__)
|
||||
#include "absl/base/internal/malloc_hook_mmap_linux.inc"
|
||||
|
||||
#elif ABSL_HAVE_MMAP
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
// static
|
||||
void* MallocHook::UnhookedMMap(void* start, size_t size, int protection,
|
||||
int flags, int fd, off_t offset) {
|
||||
void* result;
|
||||
if (!MallocHook::InvokeMmapReplacement(
|
||||
start, size, protection, flags, fd, offset, &result)) {
|
||||
result = mmap(start, size, protection, flags, fd, offset);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
int MallocHook::UnhookedMUnmap(void* start, size_t size) {
|
||||
int result;
|
||||
if (!MallocHook::InvokeMunmapReplacement(start, size, &result)) {
|
||||
result = munmap(start, size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
|
||||
#endif
|
|
@ -1,284 +0,0 @@
|
|||
//
|
||||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Some of our malloc implementations can invoke the following hooks whenever
|
||||
// memory is allocated or deallocated. MallocHook is thread-safe, and things
|
||||
// you do before calling AddFooHook(MyHook) are visible to any resulting calls
|
||||
// to MyHook. Hooks must be thread-safe. If you write:
|
||||
//
|
||||
// CHECK(MallocHook::AddNewHook(&MyNewHook));
|
||||
//
|
||||
// MyNewHook will be invoked in subsequent calls in the current thread, but
|
||||
// there are no guarantees on when it might be invoked in other threads.
|
||||
//
|
||||
// There are a limited number of slots available for each hook type. Add*Hook
|
||||
// will return false if there are no slots available. Remove*Hook will return
|
||||
// false if the given hook was not already installed.
|
||||
//
|
||||
// The order in which individual hooks are called in Invoke*Hook is undefined.
|
||||
//
|
||||
// It is safe for a hook to remove itself within Invoke*Hook and add other
|
||||
// hooks. Any hooks added inside a hook invocation (for the same hook type)
|
||||
// will not be invoked for the current invocation.
|
||||
//
|
||||
// One important user of these hooks is the heap profiler.
|
||||
//
|
||||
// CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be
|
||||
// directly in the code of the (de)allocation function that is provided to the
|
||||
// user and that function must have an ABSL_ATTRIBUTE_SECTION(malloc_hook)
|
||||
// attribute.
|
||||
//
|
||||
// Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you
|
||||
// need to invoke a hook (which you shouldn't unless you're part of tcmalloc),
|
||||
// be sure to #include malloc_hook-inl.h in addition to malloc_hook.h.
|
||||
//
|
||||
// NOTE FOR C USERS: If you want to use malloc_hook functionality from
|
||||
// a C program, #include malloc_hook_c.h instead of this file.
|
||||
//
|
||||
// IWYU pragma: private, include "base/malloc_hook.h"
|
||||
|
||||
#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_H_
|
||||
#define ABSL_BASE_INTERNAL_MALLOC_HOOK_H_
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <cstddef>
|
||||
|
||||
#include "absl/base/config.h"
|
||||
#include "absl/base/internal/malloc_hook_c.h"
|
||||
#include "absl/base/port.h"
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
// Note: malloc_hook_c.h defines MallocHook_*Hook and
|
||||
// MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook
|
||||
// class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h
|
||||
// for details of these types/functions.
|
||||
|
||||
class MallocHook {
|
||||
public:
|
||||
// The NewHook is invoked whenever an object is being allocated.
|
||||
// Object pointer and size are passed in.
|
||||
// It may be passed null pointer if the allocator returned null.
|
||||
typedef MallocHook_NewHook NewHook;
|
||||
static bool AddNewHook(NewHook hook);
|
||||
static bool RemoveNewHook(NewHook hook);
|
||||
inline static void InvokeNewHook(const void* ptr, size_t size);
|
||||
|
||||
// The DeleteHook is invoked whenever an object is being deallocated.
|
||||
// Object pointer is passed in.
|
||||
// It may be passed null pointer if the caller is trying to delete null.
|
||||
typedef MallocHook_DeleteHook DeleteHook;
|
||||
static bool AddDeleteHook(DeleteHook hook);
|
||||
static bool RemoveDeleteHook(DeleteHook hook);
|
||||
inline static void InvokeDeleteHook(const void* ptr);
|
||||
|
||||
// The SampledNewHook is invoked for some subset of object allocations
|
||||
// according to the sampling policy of an allocator such as tcmalloc.
|
||||
// SampledAlloc has the following fields:
|
||||
// * AllocHandle handle: to be set to an effectively unique value (in this
|
||||
// process) by allocator.
|
||||
// * size_t allocated_size: space actually used by allocator to host the
|
||||
// object. Not necessarily equal to the requested size due to alignment
|
||||
// and other reasons.
|
||||
// * double weight: the expected number of allocations matching this profile
|
||||
// that this sample represents.
|
||||
// * int stack_depth and const void* stack: invocation stack for
|
||||
// the allocation.
|
||||
// The allocator invoking the hook should record the handle value and later
|
||||
// call InvokeSampledDeleteHook() with that value.
|
||||
typedef MallocHook_SampledNewHook SampledNewHook;
|
||||
typedef MallocHook_SampledAlloc SampledAlloc;
|
||||
static bool AddSampledNewHook(SampledNewHook hook);
|
||||
static bool RemoveSampledNewHook(SampledNewHook hook);
|
||||
inline static void InvokeSampledNewHook(const SampledAlloc* sampled_alloc);
|
||||
|
||||
// The SampledDeleteHook is invoked whenever an object previously chosen
|
||||
// by an allocator for sampling is being deallocated.
|
||||
// The handle identifying the object --as previously chosen by
|
||||
// InvokeSampledNewHook()-- is passed in.
|
||||
typedef MallocHook_SampledDeleteHook SampledDeleteHook;
|
||||
typedef MallocHook_AllocHandle AllocHandle;
|
||||
static bool AddSampledDeleteHook(SampledDeleteHook hook);
|
||||
static bool RemoveSampledDeleteHook(SampledDeleteHook hook);
|
||||
inline static void InvokeSampledDeleteHook(AllocHandle handle);
|
||||
|
||||
// The PreMmapHook is invoked with mmap's or mmap64's arguments just
|
||||
// before the mmap/mmap64 call is actually made. Such a hook may be useful
|
||||
// in memory limited contexts, to catch allocations that will exceed
|
||||
// a memory limit, and take outside actions to increase that limit.
|
||||
typedef MallocHook_PreMmapHook PreMmapHook;
|
||||
static bool AddPreMmapHook(PreMmapHook hook);
|
||||
static bool RemovePreMmapHook(PreMmapHook hook);
|
||||
inline static void InvokePreMmapHook(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset);
|
||||
|
||||
// The MmapReplacement is invoked with mmap's arguments and place to put the
|
||||
// result into after the PreMmapHook but before the mmap/mmap64 call is
|
||||
// actually made.
|
||||
// The MmapReplacement should return true if it handled the call, or false
|
||||
// if it is still necessary to call mmap/mmap64.
|
||||
// This should be used only by experts, and users must be be
|
||||
// extremely careful to avoid recursive calls to mmap. The replacement
|
||||
// should be async signal safe.
|
||||
// Only one MmapReplacement is supported. After setting an MmapReplacement
|
||||
// you must call RemoveMmapReplacement before calling SetMmapReplacement
|
||||
// again.
|
||||
typedef MallocHook_MmapReplacement MmapReplacement;
|
||||
static bool SetMmapReplacement(MmapReplacement hook);
|
||||
static bool RemoveMmapReplacement(MmapReplacement hook);
|
||||
inline static bool InvokeMmapReplacement(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset,
|
||||
void** result);
|
||||
|
||||
|
||||
// The MmapHook is invoked with mmap's return value and arguments whenever
|
||||
// a region of memory has been just mapped.
|
||||
// It may be passed MAP_FAILED if the mmap failed.
|
||||
typedef MallocHook_MmapHook MmapHook;
|
||||
static bool AddMmapHook(MmapHook hook);
|
||||
static bool RemoveMmapHook(MmapHook hook);
|
||||
inline static void InvokeMmapHook(const void* result,
|
||||
const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset);
|
||||
|
||||
// The MunmapReplacement is invoked with munmap's arguments and place to put
|
||||
// the result into just before the munmap call is actually made.
|
||||
// The MunmapReplacement should return true if it handled the call, or false
|
||||
// if it is still necessary to call munmap.
|
||||
// This should be used only by experts. The replacement should be
|
||||
// async signal safe.
|
||||
// Only one MunmapReplacement is supported. After setting an
|
||||
// MunmapReplacement you must call RemoveMunmapReplacement before
|
||||
// calling SetMunmapReplacement again.
|
||||
typedef MallocHook_MunmapReplacement MunmapReplacement;
|
||||
static bool SetMunmapReplacement(MunmapReplacement hook);
|
||||
static bool RemoveMunmapReplacement(MunmapReplacement hook);
|
||||
inline static bool InvokeMunmapReplacement(const void* start,
|
||||
size_t size,
|
||||
int* result);
|
||||
|
||||
// The MunmapHook is invoked with munmap's arguments just before the munmap
|
||||
// call is actually made.
|
||||
// TODO(maxim): Rename this to PreMunmapHook for consistency with PreMmapHook
|
||||
// and PreSbrkHook.
|
||||
typedef MallocHook_MunmapHook MunmapHook;
|
||||
static bool AddMunmapHook(MunmapHook hook);
|
||||
static bool RemoveMunmapHook(MunmapHook hook);
|
||||
inline static void InvokeMunmapHook(const void* start, size_t size);
|
||||
|
||||
// The MremapHook is invoked with mremap's return value and arguments
|
||||
// whenever a region of memory has been just remapped.
|
||||
typedef MallocHook_MremapHook MremapHook;
|
||||
static bool AddMremapHook(MremapHook hook);
|
||||
static bool RemoveMremapHook(MremapHook hook);
|
||||
inline static void InvokeMremapHook(const void* result,
|
||||
const void* old_addr,
|
||||
size_t old_size,
|
||||
size_t new_size,
|
||||
int flags,
|
||||
const void* new_addr);
|
||||
|
||||
// The PreSbrkHook is invoked with sbrk's argument just before sbrk is called
|
||||
// -- except when the increment is 0. This is because sbrk(0) is often called
|
||||
// to get the top of the memory stack, and is not actually a
|
||||
// memory-allocation call. It may be useful in memory-limited contexts,
|
||||
// to catch allocations that will exceed the limit and take outside
|
||||
// actions to increase such a limit.
|
||||
typedef MallocHook_PreSbrkHook PreSbrkHook;
|
||||
static bool AddPreSbrkHook(PreSbrkHook hook);
|
||||
static bool RemovePreSbrkHook(PreSbrkHook hook);
|
||||
inline static void InvokePreSbrkHook(ptrdiff_t increment);
|
||||
|
||||
// The SbrkHook is invoked with sbrk's result and argument whenever sbrk
|
||||
// has just executed -- except when the increment is 0.
|
||||
// This is because sbrk(0) is often called to get the top of the memory stack,
|
||||
// and is not actually a memory-allocation call.
|
||||
typedef MallocHook_SbrkHook SbrkHook;
|
||||
static bool AddSbrkHook(SbrkHook hook);
|
||||
static bool RemoveSbrkHook(SbrkHook hook);
|
||||
inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment);
|
||||
|
||||
// Pointer to a absl::GetStackTrace implementation, following the API in
|
||||
// base/stacktrace.h.
|
||||
using GetStackTraceFn = int (*)(void**, int, int);
|
||||
|
||||
// Get the current stack trace. Try to skip all routines up to and
|
||||
// including the caller of MallocHook::Invoke*.
|
||||
// Use "skip_count" (similarly to absl::GetStackTrace from stacktrace.h)
|
||||
// as a hint about how many routines to skip if better information
|
||||
// is not available.
|
||||
// Stack trace is filled into *result up to the size of max_depth.
|
||||
// The actual number of stack frames filled is returned.
|
||||
static int GetCallerStackTrace(void** result, int max_depth, int skip_count,
|
||||
GetStackTraceFn get_stack_trace_fn);
|
||||
|
||||
#if ABSL_HAVE_MMAP
|
||||
// Unhooked versions of mmap() and munmap(). These should be used
|
||||
// only by experts, since they bypass heapchecking, etc.
|
||||
// Note: These do not run hooks, but they still use the MmapReplacement
|
||||
// and MunmapReplacement.
|
||||
static void* UnhookedMMap(void* start, size_t size, int protection, int flags,
|
||||
int fd, off_t offset);
|
||||
static int UnhookedMUnmap(void* start, size_t size);
|
||||
#endif
|
||||
|
||||
private:
|
||||
// Slow path versions of Invoke*Hook.
|
||||
static void InvokeNewHookSlow(const void* ptr,
|
||||
size_t size) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeDeleteHookSlow(const void* ptr) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeSampledNewHookSlow(const SampledAlloc* sampled_alloc)
|
||||
ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeSampledDeleteHookSlow(AllocHandle handle)
|
||||
ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokePreMmapHookSlow(const void* start, size_t size,
|
||||
int protection, int flags, int fd,
|
||||
off_t offset) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeMmapHookSlow(const void* result, const void* start,
|
||||
size_t size, int protection, int flags, int fd,
|
||||
off_t offset) ABSL_ATTRIBUTE_COLD;
|
||||
static bool InvokeMmapReplacementSlow(const void* start, size_t size,
|
||||
int protection, int flags, int fd,
|
||||
off_t offset,
|
||||
void** result) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeMunmapHookSlow(const void* ptr,
|
||||
size_t size) ABSL_ATTRIBUTE_COLD;
|
||||
static bool InvokeMunmapReplacementSlow(const void* ptr, size_t size,
|
||||
int* result) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeMremapHookSlow(const void* result, const void* old_addr,
|
||||
size_t old_size, size_t new_size, int flags,
|
||||
const void* new_addr) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokePreSbrkHookSlow(ptrdiff_t increment) ABSL_ATTRIBUTE_COLD;
|
||||
static void InvokeSbrkHookSlow(const void* result,
|
||||
ptrdiff_t increment) ABSL_ATTRIBUTE_COLD;
|
||||
};
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
#endif // ABSL_BASE_INTERNAL_MALLOC_HOOK_H_
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The Abseil Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* C shims for the C++ malloc_hook.h. See malloc_hook.h for details
|
||||
* on how to use these.
|
||||
*/
|
||||
#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_
|
||||
#define ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
typedef int (*MallocHook_GetStackTraceFn)(void**, int, int);
|
||||
typedef void (*MallocHook_NewHook)(const void* ptr, size_t size);
|
||||
typedef void (*MallocHook_DeleteHook)(const void* ptr);
|
||||
typedef int64_t MallocHook_AllocHandle;
|
||||
typedef struct {
|
||||
/* See malloc_hook.h for documentation for this struct. */
|
||||
MallocHook_AllocHandle handle;
|
||||
size_t allocated_size;
|
||||
double weight;
|
||||
int stack_depth;
|
||||
const void* stack;
|
||||
} MallocHook_SampledAlloc;
|
||||
typedef void (*MallocHook_SampledNewHook)(
|
||||
const MallocHook_SampledAlloc* sampled_alloc);
|
||||
typedef void (*MallocHook_SampledDeleteHook)(MallocHook_AllocHandle handle);
|
||||
typedef void (*MallocHook_PreMmapHook)(const void* start, size_t size,
|
||||
int protection, int flags, int fd,
|
||||
off_t offset);
|
||||
typedef void (*MallocHook_MmapHook)(const void* result, const void* start,
|
||||
size_t size, int protection, int flags,
|
||||
int fd, off_t offset);
|
||||
typedef int (*MallocHook_MmapReplacement)(const void* start, size_t size,
|
||||
int protection, int flags, int fd,
|
||||
off_t offset, void** result);
|
||||
typedef void (*MallocHook_MunmapHook)(const void* start, size_t size);
|
||||
typedef int (*MallocHook_MunmapReplacement)(const void* start, size_t size,
|
||||
int* result);
|
||||
typedef void (*MallocHook_MremapHook)(const void* result, const void* old_addr,
|
||||
size_t old_size, size_t new_size,
|
||||
int flags, const void* new_addr);
|
||||
typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment);
|
||||
typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* ABSL_BASE_INTERNAL_MALLOC_HOOK_C_H_ */
|
|
@ -1,198 +0,0 @@
|
|||
//
|
||||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
///
|
||||
|
||||
// This has the implementation details of malloc_hook that are needed
|
||||
// to use malloc-hook inside the tcmalloc system. It does not hold
|
||||
// any of the client-facing calls that are used to add new hooks.
|
||||
//
|
||||
// IWYU pragma: private, include "base/malloc_hook-inl.h"
|
||||
|
||||
#ifndef ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_
|
||||
#define ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
|
||||
#include "absl/base/internal/malloc_hook.h"
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
// Maximum of 7 hooks means that HookList is 8 words.
|
||||
static constexpr int kHookListMaxValues = 7;
|
||||
|
||||
// HookList: a class that provides synchronized insertions and removals and
|
||||
// lockless traversal. Most of the implementation is in malloc_hook.cc.
|
||||
template <typename T>
|
||||
struct HookList {
|
||||
static_assert(sizeof(T) <= sizeof(intptr_t), "T_should_fit_in_intptr_t");
|
||||
|
||||
// Adds value to the list. Note that duplicates are allowed. Thread-safe and
|
||||
// blocking (acquires hooklist_spinlock). Returns true on success; false
|
||||
// otherwise (failures include invalid value and no space left).
|
||||
bool Add(T value);
|
||||
|
||||
// Removes the first entry matching value from the list. Thread-safe and
|
||||
// blocking (acquires hooklist_spinlock). Returns true on success; false
|
||||
// otherwise (failures include invalid value and no value found).
|
||||
bool Remove(T value);
|
||||
|
||||
// Store up to n values of the list in output_array, and return the number of
|
||||
// elements stored. Thread-safe and non-blocking. This is fast (one memory
|
||||
// access) if the list is empty.
|
||||
int Traverse(T* output_array, int n) const;
|
||||
|
||||
// Fast inline implementation for fast path of Invoke*Hook.
|
||||
bool empty() const {
|
||||
// empty() is only used as an optimization to determine if we should call
|
||||
// Traverse which has proper acquire loads. Memory reordering around a
|
||||
// call to empty will either lead to an unnecessary Traverse call, or will
|
||||
// miss invoking hooks, neither of which is a problem.
|
||||
return priv_end.load(std::memory_order_relaxed) == 0;
|
||||
}
|
||||
|
||||
// This internal data is not private so that the class is an aggregate and can
|
||||
// be initialized by the linker. Don't access this directly. Use the
|
||||
// INIT_HOOK_LIST macro in malloc_hook.cc.
|
||||
|
||||
// One more than the index of the last valid element in priv_data. During
|
||||
// 'Remove' this may be past the last valid element in priv_data, but
|
||||
// subsequent values will be 0.
|
||||
std::atomic<int> priv_end;
|
||||
std::atomic<intptr_t> priv_data[kHookListMaxValues];
|
||||
};
|
||||
|
||||
extern template struct HookList<MallocHook::NewHook>;
|
||||
|
||||
extern HookList<MallocHook::NewHook> new_hooks_;
|
||||
extern HookList<MallocHook::DeleteHook> delete_hooks_;
|
||||
extern HookList<MallocHook::SampledNewHook> sampled_new_hooks_;
|
||||
extern HookList<MallocHook::SampledDeleteHook> sampled_delete_hooks_;
|
||||
extern HookList<MallocHook::PreMmapHook> premmap_hooks_;
|
||||
extern HookList<MallocHook::MmapHook> mmap_hooks_;
|
||||
extern HookList<MallocHook::MmapReplacement> mmap_replacement_;
|
||||
extern HookList<MallocHook::MunmapHook> munmap_hooks_;
|
||||
extern HookList<MallocHook::MunmapReplacement> munmap_replacement_;
|
||||
extern HookList<MallocHook::MremapHook> mremap_hooks_;
|
||||
extern HookList<MallocHook::PreSbrkHook> presbrk_hooks_;
|
||||
extern HookList<MallocHook::SbrkHook> sbrk_hooks_;
|
||||
|
||||
inline void MallocHook::InvokeNewHook(const void* ptr, size_t size) {
|
||||
if (!absl::base_internal::new_hooks_.empty()) {
|
||||
InvokeNewHookSlow(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeDeleteHook(const void* ptr) {
|
||||
if (!absl::base_internal::delete_hooks_.empty()) {
|
||||
InvokeDeleteHookSlow(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeSampledNewHook(
|
||||
const SampledAlloc* sampled_alloc) {
|
||||
if (!absl::base_internal::sampled_new_hooks_.empty()) {
|
||||
InvokeSampledNewHookSlow(sampled_alloc);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeSampledDeleteHook(AllocHandle handle) {
|
||||
if (!absl::base_internal::sampled_delete_hooks_.empty()) {
|
||||
InvokeSampledDeleteHookSlow(handle);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokePreMmapHook(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset) {
|
||||
if (!absl::base_internal::premmap_hooks_.empty()) {
|
||||
InvokePreMmapHookSlow(start, size, protection, flags, fd, offset);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeMmapHook(const void* result,
|
||||
const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset) {
|
||||
if (!absl::base_internal::mmap_hooks_.empty()) {
|
||||
InvokeMmapHookSlow(result, start, size, protection, flags, fd, offset);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool MallocHook::InvokeMmapReplacement(const void* start,
|
||||
size_t size,
|
||||
int protection,
|
||||
int flags,
|
||||
int fd,
|
||||
off_t offset,
|
||||
void** result) {
|
||||
if (!absl::base_internal::mmap_replacement_.empty()) {
|
||||
return InvokeMmapReplacementSlow(start, size,
|
||||
protection, flags,
|
||||
fd, offset,
|
||||
result);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeMunmapHook(const void* start, size_t size) {
|
||||
if (!absl::base_internal::munmap_hooks_.empty()) {
|
||||
InvokeMunmapHookSlow(start, size);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool MallocHook::InvokeMunmapReplacement(
|
||||
const void* start, size_t size, int* result) {
|
||||
if (!absl::base_internal::mmap_replacement_.empty()) {
|
||||
return InvokeMunmapReplacementSlow(start, size, result);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeMremapHook(const void* result,
|
||||
const void* old_addr,
|
||||
size_t old_size,
|
||||
size_t new_size,
|
||||
int flags,
|
||||
const void* new_addr) {
|
||||
if (!absl::base_internal::mremap_hooks_.empty()) {
|
||||
InvokeMremapHookSlow(result, old_addr, old_size, new_size, flags, new_addr);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokePreSbrkHook(ptrdiff_t increment) {
|
||||
if (!absl::base_internal::presbrk_hooks_.empty() && increment != 0) {
|
||||
InvokePreSbrkHookSlow(increment);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MallocHook::InvokeSbrkHook(const void* result,
|
||||
ptrdiff_t increment) {
|
||||
if (!absl::base_internal::sbrk_hooks_.empty() && increment != 0) {
|
||||
InvokeSbrkHookSlow(result, increment);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
||||
#endif // ABSL_BASE_INTERNAL_MALLOC_HOOK_INVOKE_H_
|
|
@ -1,168 +0,0 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// We define mmap() and mmap64(), which somewhat reimplements libc's mmap
|
||||
// syscall stubs. Unfortunately libc only exports the stubs via weak symbols
|
||||
// (which we're overriding with our mmap64() and mmap() wrappers) so we can't
|
||||
// just call through to them.
|
||||
|
||||
#ifndef __linux__
|
||||
# error Should only be including malloc_hook_mmap_linux.h on linux systems.
|
||||
#endif
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
#ifdef __BIONIC__
|
||||
#include <sys/syscall.h>
|
||||
#else
|
||||
#include <syscall.h>
|
||||
#endif
|
||||
|
||||
#include <linux/unistd.h>
|
||||
#include <unistd.h>
|
||||
#include <cerrno>
|
||||
#include <cstdarg>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/base/internal/direct_mmap.h"
|
||||
|
||||
// SYS_mremap is not defined in Android.
|
||||
#ifdef __BIONIC__
|
||||
#ifndef SYS_mremap
|
||||
#define SYS_mremap __NR_mremap
|
||||
#endif
|
||||
#endif // __BIONIC__
|
||||
|
||||
// We put MallocHook::InvokeMmapHook calls right into mmap and mmap64, so that
|
||||
// the stack frames in the caller's stack are at the same offsets for all the
|
||||
// calls of memory allocating functions.
|
||||
|
||||
// Put all callers of MallocHook::Invoke* in this module into
|
||||
// malloc_hook section,
|
||||
// so that MallocHook::GetCallerStackTrace can function accurately:
|
||||
|
||||
// Make sure mmap doesn't get #define'd away by <sys/mman.h>
|
||||
# undef mmap
|
||||
|
||||
extern "C" {
|
||||
ABSL_ATTRIBUTE_SECTION(malloc_hook)
|
||||
void* mmap64(void* start, size_t length, int prot, int flags, int fd,
|
||||
off64_t offset) __THROW;
|
||||
ABSL_ATTRIBUTE_SECTION(malloc_hook)
|
||||
void* mmap(void* start, size_t length, int prot, int flags, int fd,
|
||||
off_t offset) __THROW;
|
||||
ABSL_ATTRIBUTE_SECTION(malloc_hook)
|
||||
int munmap(void* start, size_t length) __THROW;
|
||||
ABSL_ATTRIBUTE_SECTION(malloc_hook)
|
||||
void* mremap(void* old_addr, size_t old_size, size_t new_size, int flags,
|
||||
...) __THROW;
|
||||
ABSL_ATTRIBUTE_SECTION(malloc_hook) void* sbrk(ptrdiff_t increment) __THROW;
|
||||
}
|
||||
|
||||
extern "C" void* mmap64(void *start, size_t length, int prot, int flags,
|
||||
int fd, off64_t offset) __THROW {
|
||||
absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags,
|
||||
fd, offset);
|
||||
void *result;
|
||||
if (!absl::base_internal::MallocHook::InvokeMmapReplacement(
|
||||
start, length, prot, flags, fd, offset, &result)) {
|
||||
result = absl::base_internal::DirectMmap(start, length, prot, flags, fd,
|
||||
offset);
|
||||
}
|
||||
absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot,
|
||||
flags, fd, offset);
|
||||
return result;
|
||||
}
|
||||
|
||||
# if !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
|
||||
|
||||
extern "C" void* mmap(void *start, size_t length, int prot, int flags,
|
||||
int fd, off_t offset) __THROW {
|
||||
absl::base_internal::MallocHook::InvokePreMmapHook(start, length, prot, flags,
|
||||
fd, offset);
|
||||
void *result;
|
||||
if (!absl::base_internal::MallocHook::InvokeMmapReplacement(
|
||||
start, length, prot, flags, fd, offset, &result)) {
|
||||
result = absl::base_internal::DirectMmap(
|
||||
start, length, prot, flags, fd,
|
||||
static_cast<size_t>(offset)); // avoid sign extension
|
||||
}
|
||||
absl::base_internal::MallocHook::InvokeMmapHook(result, start, length, prot,
|
||||
flags, fd, offset);
|
||||
return result;
|
||||
}
|
||||
|
||||
# endif // !defined(__USE_FILE_OFFSET64) || !defined(__REDIRECT_NTH)
|
||||
|
||||
extern "C" int munmap(void* start, size_t length) __THROW {
|
||||
absl::base_internal::MallocHook::InvokeMunmapHook(start, length);
|
||||
int result;
|
||||
if (!absl::base_internal::MallocHook::InvokeMunmapReplacement(start, length,
|
||||
&result)) {
|
||||
result = absl::base_internal::DirectMunmap(start, length);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" void* mremap(void* old_addr, size_t old_size, size_t new_size,
|
||||
int flags, ...) __THROW {
|
||||
va_list ap;
|
||||
va_start(ap, flags);
|
||||
void *new_address = va_arg(ap, void *);
|
||||
va_end(ap);
|
||||
void* result = reinterpret_cast<void*>(
|
||||
syscall(SYS_mremap, old_addr, old_size, new_size, flags, new_address));
|
||||
absl::base_internal::MallocHook::InvokeMremapHook(
|
||||
result, old_addr, old_size, new_size, flags, new_address);
|
||||
return result;
|
||||
}
|
||||
|
||||
// sbrk cannot be intercepted on Android as there is no mechanism to
|
||||
// invoke the original sbrk (since there is no __sbrk as with glibc).
|
||||
#if !defined(__BIONIC__)
|
||||
// libc's version:
|
||||
extern "C" void* __sbrk(ptrdiff_t increment);
|
||||
|
||||
extern "C" void* sbrk(ptrdiff_t increment) __THROW {
|
||||
absl::base_internal::MallocHook::InvokePreSbrkHook(increment);
|
||||
void *result = __sbrk(increment);
|
||||
absl::base_internal::MallocHook::InvokeSbrkHook(result, increment);
|
||||
return result;
|
||||
}
|
||||
#endif // !defined(__BIONIC__)
|
||||
|
||||
namespace absl {
|
||||
namespace base_internal {
|
||||
|
||||
/*static*/void* MallocHook::UnhookedMMap(void *start, size_t length, int prot,
|
||||
int flags, int fd, off_t offset) {
|
||||
void* result;
|
||||
if (!MallocHook::InvokeMmapReplacement(
|
||||
start, length, prot, flags, fd, offset, &result)) {
|
||||
result = absl::base_internal::DirectMmap(start, length, prot, flags, fd,
|
||||
offset);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*static*/int MallocHook::UnhookedMUnmap(void *start, size_t length) {
|
||||
int result;
|
||||
if (!MallocHook::InvokeMunmapReplacement(start, length, &result)) {
|
||||
result = absl::base_internal::DirectMunmap(start, length);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace base_internal
|
||||
} // namespace absl
|
|
@ -207,7 +207,7 @@ class FixedArray {
|
|||
// Overload of FixedArray::at() to return a const reference to the ith element
|
||||
// of the fixed array.
|
||||
const_reference at(size_type i) const {
|
||||
if (i >= size()) {
|
||||
if (ABSL_PREDICT_FALSE(i >= size())) {
|
||||
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
|
||||
}
|
||||
return data()[i];
|
||||
|
|
|
@ -651,7 +651,7 @@ TEST(IntVec, AliasingCopyAssignment) {
|
|||
IntVec original;
|
||||
Fill(&original, len);
|
||||
IntVec dup = original;
|
||||
dup = dup;
|
||||
dup = *&dup;
|
||||
EXPECT_EQ(dup, original);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,6 +75,24 @@ cc_test(
|
|||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "examine_stack",
|
||||
srcs = [
|
||||
"internal/examine_stack.cc",
|
||||
],
|
||||
hdrs = [
|
||||
"internal/examine_stack.h",
|
||||
],
|
||||
copts = ABSL_DEFAULT_COPTS,
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
":stacktrace",
|
||||
":symbolize",
|
||||
"//absl/base",
|
||||
"//absl/base:core_headers",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "debugging_internal",
|
||||
srcs = [
|
||||
|
@ -234,11 +252,11 @@ cc_library(
|
|||
srcs = ["internal/stack_consumption.cc"],
|
||||
hdrs = ["internal/stack_consumption.h"],
|
||||
copts = ABSL_DEFAULT_COPTS,
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//absl/base",
|
||||
"//absl/base:core_headers",
|
||||
],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
|
|
|
@ -25,6 +25,7 @@ list(APPEND DEBUGGING_INTERNAL_HEADERS
|
|||
"internal/address_is_readable.h"
|
||||
"internal/demangle.h"
|
||||
"internal/elf_mem_image.h"
|
||||
"internal/examine_stack.h"
|
||||
"internal/stacktrace_config.h"
|
||||
"internal/symbolize.h"
|
||||
"internal/vdso_support.h"
|
||||
|
@ -49,6 +50,12 @@ list(APPEND SYMBOLIZE_SRC
|
|||
${DEBUGGING_INTERNAL_HEADERS}
|
||||
)
|
||||
|
||||
list(APPEND EXAMINE_STACK_SRC
|
||||
"internal/examine_stack.cc"
|
||||
${DEBUGGING_PUBLIC_HEADERS}
|
||||
${DEBUGGING_INTERNAL_HEADERS}
|
||||
)
|
||||
|
||||
absl_library(
|
||||
TARGET
|
||||
absl_stacktrace
|
||||
|
@ -67,6 +74,17 @@ absl_library(
|
|||
symbolize
|
||||
)
|
||||
|
||||
# Internal-only. Projects external to Abseil should not depend
|
||||
# directly on this library.
|
||||
absl_library(
|
||||
TARGET
|
||||
absl_examine_stack
|
||||
SOURCES
|
||||
${EXAMINE_STACK_SRC}
|
||||
EXPORT_NAME
|
||||
examine_stack
|
||||
)
|
||||
|
||||
list(APPEND LEAK_CHECK_SRC
|
||||
"leak_check.cc"
|
||||
)
|
||||
|
|
147
absl/debugging/internal/examine_stack.cc
Normal file
147
absl/debugging/internal/examine_stack.cc
Normal file
|
@ -0,0 +1,147 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include "absl/debugging/internal/examine_stack.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <csignal>
|
||||
#include <cstdio>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/macros.h"
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
#include "absl/debugging/symbolize.h"
|
||||
|
||||
namespace absl {
|
||||
namespace debugging_internal {
|
||||
|
||||
// Returns the program counter from signal context, nullptr if
|
||||
// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
|
||||
// ucontext_t on non-POSIX systems.
|
||||
void* GetProgramCounter(void* vuc) {
|
||||
#ifdef __linux__
|
||||
if (vuc != nullptr) {
|
||||
ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
|
||||
#if defined(__aarch64__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.pc);
|
||||
#elif defined(__arm__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.arm_pc);
|
||||
#elif defined(__i386__)
|
||||
if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.gregs[14]);
|
||||
#elif defined(__powerpc64__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.gp_regs[32]);
|
||||
#elif defined(__powerpc__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.regs->nip);
|
||||
#elif defined(__x86_64__)
|
||||
if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
|
||||
#else
|
||||
#error "Undefined Architecture."
|
||||
#endif
|
||||
}
|
||||
#elif defined(__akaros__)
|
||||
auto* ctx = reinterpret_cast<struct user_context*>(vuc);
|
||||
return reinterpret_cast<void*>(get_user_ctx_pc(ctx));
|
||||
#endif
|
||||
static_cast<void>(vuc);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// The %p field width for printf() functions is two characters per byte,
|
||||
// and two extra for the leading "0x".
|
||||
static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
|
||||
|
||||
// Print a program counter, its stack frame size, and its symbol name.
|
||||
// Note that there is a separate symbolize_pc argument. Return addresses may be
|
||||
// at the end of the function, and this allows the caller to back up from pc if
|
||||
// appropriate.
|
||||
static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
|
||||
void* writerfn_arg, void* pc,
|
||||
void* symbolize_pc, int framesize,
|
||||
const char* const prefix) {
|
||||
char tmp[1024];
|
||||
const char* symbol = "(unknown)";
|
||||
if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
|
||||
symbol = tmp;
|
||||
}
|
||||
char buf[1024];
|
||||
if (framesize <= 0) {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc, symbol);
|
||||
} else {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc, framesize, symbol);
|
||||
}
|
||||
writerfn(buf, writerfn_arg);
|
||||
}
|
||||
|
||||
// Print a program counter and the corresponding stack frame size.
|
||||
static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
|
||||
void* writerfn_arg, void* pc, int framesize,
|
||||
const char* const prefix) {
|
||||
char buf[100];
|
||||
if (framesize <= 0) {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc);
|
||||
} else {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc, framesize);
|
||||
}
|
||||
writerfn(buf, writerfn_arg);
|
||||
}
|
||||
|
||||
void DumpPCAndFrameSizesAndStackTrace(
|
||||
void* pc, void* const stack[], int frame_sizes[], int depth,
|
||||
int min_dropped_frames, bool symbolize_stacktrace,
|
||||
void (*writerfn)(const char*, void*), void* writerfn_arg) {
|
||||
if (pc != nullptr) {
|
||||
// We don't know the stack frame size for PC, use 0.
|
||||
if (symbolize_stacktrace) {
|
||||
DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
|
||||
} else {
|
||||
DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < depth; i++) {
|
||||
if (symbolize_stacktrace) {
|
||||
// Pass the previous address of pc as the symbol address because pc is a
|
||||
// return address, and an overrun may occur when the function ends with a
|
||||
// call to a function annotated noreturn (e.g. CHECK). Note that we don't
|
||||
// do this for pc above, as the adjustment is only correct for return
|
||||
// addresses.
|
||||
DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
|
||||
reinterpret_cast<char*>(stack[i]) - 1,
|
||||
frame_sizes[i], " ");
|
||||
} else {
|
||||
DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
|
||||
" ");
|
||||
}
|
||||
}
|
||||
if (min_dropped_frames > 0) {
|
||||
char buf[100];
|
||||
snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
|
||||
min_dropped_frames);
|
||||
writerfn(buf, writerfn_arg);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace debugging_internal
|
||||
} // namespace absl
|
38
absl/debugging/internal/examine_stack.h
Normal file
38
absl/debugging/internal/examine_stack.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
|
||||
|
||||
namespace absl {
|
||||
namespace debugging_internal {
|
||||
|
||||
// Returns the program counter from signal context, or nullptr if
|
||||
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
|
||||
// ucontext_t on non-POSIX systems.
|
||||
void* GetProgramCounter(void* vuc);
|
||||
|
||||
// Uses `writerfn` to dump the program counter, stack trace, and stack
|
||||
// frame sizes.
|
||||
void DumpPCAndFrameSizesAndStackTrace(
|
||||
void* pc, void* const stack[], int frame_sizes[], int depth,
|
||||
int min_dropped_frames, bool symbolize_stacktrace,
|
||||
void (*writerfn)(const char*, void*), void* writerfn_arg);
|
||||
|
||||
} // namespace debugging_internal
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
|
|
@ -168,13 +168,13 @@ class string_view {
|
|||
string_view( // NOLINT(runtime/explicit)
|
||||
const std::basic_string<char, std::char_traits<char>, Allocator>&
|
||||
str) noexcept
|
||||
: ptr_(str.data()), length_(str.size()) {}
|
||||
: ptr_(str.data()), length_(CheckLengthInternal(str.size())) {}
|
||||
|
||||
// Implicit constructor of a `string_view` from nul-terminated `str`. When
|
||||
// accepting possibly null strings, use `absl::NullSafeStringView(str)`
|
||||
// instead (see below).
|
||||
constexpr string_view(const char* str) // NOLINT(runtime/explicit)
|
||||
: ptr_(str), length_(StrLenInternal(str)) {}
|
||||
: ptr_(str), length_(CheckLengthInternal(StrLenInternal(str))) {}
|
||||
|
||||
// Implicit constructor of a `string_view` from a `const char*` and length.
|
||||
constexpr string_view(const char* data, size_type len)
|
||||
|
@ -479,7 +479,7 @@ class string_view {
|
|||
|
||||
private:
|
||||
static constexpr size_type kMaxSize =
|
||||
std::numeric_limits<size_type>::max() / 2 + 1;
|
||||
std::numeric_limits<difference_type>::max();
|
||||
|
||||
// check whether __builtin_strlen is provided by the compiler.
|
||||
// GCC doesn't have __has_builtin()
|
||||
|
|
|
@ -1068,6 +1068,17 @@ TEST(HugeStringView, TwoPointTwoGB) {
|
|||
TEST(NonNegativeLenTest, NonNegativeLen) {
|
||||
EXPECT_DEATH_IF_SUPPORTED(absl::string_view("xyz", -1), "len <= kMaxSize");
|
||||
}
|
||||
|
||||
TEST(LenExceedsMaxSizeTest, LenExceedsMaxSize) {
|
||||
auto max_size = absl::string_view().max_size();
|
||||
|
||||
// This should construct ok (although the view itself is obviously invalid).
|
||||
absl::string_view ok_view("", max_size);
|
||||
|
||||
// Adding one to the max should trigger an assertion.
|
||||
EXPECT_DEATH_IF_SUPPORTED(absl::string_view("", max_size + 1),
|
||||
"len <= kMaxSize");
|
||||
}
|
||||
#endif // !defined(NDEBUG) && !defined(ABSL_HAVE_STD_STRING_VIEW)
|
||||
|
||||
class StringViewStreamTest : public ::testing::Test {
|
||||
|
|
|
@ -75,7 +75,6 @@ cc_library(
|
|||
"//absl/base:config",
|
||||
"//absl/base:core_headers",
|
||||
"//absl/base:dynamic_annotations",
|
||||
"//absl/base:malloc_extension",
|
||||
"//absl/base:malloc_internal",
|
||||
"//absl/debugging:stacktrace",
|
||||
"//absl/time",
|
||||
|
@ -168,7 +167,6 @@ cc_library(
|
|||
deps = [
|
||||
":synchronization",
|
||||
"//absl/base",
|
||||
"//absl/base:malloc_extension",
|
||||
"//absl/strings",
|
||||
"//absl/time",
|
||||
"@com_google_googletest//:gtest",
|
||||
|
@ -184,7 +182,6 @@ cc_test(
|
|||
":per_thread_sem_test_common",
|
||||
":synchronization",
|
||||
"//absl/base",
|
||||
"//absl/base:malloc_extension",
|
||||
"//absl/strings",
|
||||
"//absl/time",
|
||||
"@com_google_googletest//:gtest_main",
|
||||
|
|
|
@ -44,7 +44,7 @@ list(APPEND SYNCHRONIZATION_SRC
|
|||
"notification.cc"
|
||||
"mutex.cc"
|
||||
)
|
||||
set(SYNCHRONIZATION_PUBLIC_LIBRARIES absl::base absl_malloc_extension absl::time)
|
||||
set(SYNCHRONIZATION_PUBLIC_LIBRARIES absl::base absl::time)
|
||||
|
||||
absl_library(
|
||||
TARGET
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <atomic>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/internal/malloc_extension.h"
|
||||
#include "absl/base/internal/thread_identity.h"
|
||||
#include "absl/synchronization/internal/waiter.h"
|
||||
|
||||
|
@ -90,12 +89,6 @@ ABSL_ATTRIBUTE_WEAK bool AbslInternalPerThreadSemWait(
|
|||
if (identity->blocked_count_ptr != nullptr) {
|
||||
identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (identity->is_idle.load(std::memory_order_relaxed)) {
|
||||
// We became idle during the wait; become non-idle again so that
|
||||
// performance of deallocations done from now on does not suffer.
|
||||
absl::base_internal::MallocExtension::instance()->MarkThreadBusy();
|
||||
}
|
||||
identity->is_idle.store(false, std::memory_order_relaxed);
|
||||
identity->wait_start.store(0, std::memory_order_relaxed);
|
||||
return !timeout;
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
|
||||
#include "gtest/gtest.h"
|
||||
#include "absl/base/internal/cycleclock.h"
|
||||
#include "absl/base/internal/malloc_extension.h"
|
||||
#include "absl/base/internal/thread_identity.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/time/clock.h"
|
||||
|
@ -167,79 +166,6 @@ TEST_F(PerThreadSemTest, Timeouts) {
|
|||
EXPECT_TRUE(Wait(negative_timeout));
|
||||
}
|
||||
|
||||
// Test that idle threads properly register themselves as such with malloc.
|
||||
TEST_F(PerThreadSemTest, Idle) {
|
||||
// We can't use gmock because it might use synch calls. So we do it
|
||||
// by hand, messily. I don't bother hitting every one of the
|
||||
// MallocExtension calls because most of them won't get made
|
||||
// anyway--if they do we can add them.
|
||||
class MockMallocExtension : public base_internal::MallocExtension {
|
||||
public:
|
||||
MockMallocExtension(base_internal::MallocExtension *real,
|
||||
base_internal::ThreadIdentity *id,
|
||||
std::atomic<int> *idles, std::atomic<int> *busies)
|
||||
: real_(real), id_(id), idles_(idles), busies_(busies) {}
|
||||
void MarkThreadIdle() override {
|
||||
if (base_internal::CurrentThreadIdentityIfPresent() != id_) {
|
||||
return;
|
||||
}
|
||||
idles_->fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void MarkThreadBusy() override {
|
||||
if (base_internal::CurrentThreadIdentityIfPresent() != id_) {
|
||||
return;
|
||||
}
|
||||
busies_->fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
size_t GetAllocatedSize(const void* p) override {
|
||||
return real_->GetAllocatedSize(p);
|
||||
}
|
||||
|
||||
private:
|
||||
MallocExtension *real_;
|
||||
base_internal::ThreadIdentity *id_;
|
||||
std::atomic<int>* idles_;
|
||||
std::atomic<int>* busies_;
|
||||
};
|
||||
|
||||
base_internal::ThreadIdentity *id = GetOrCreateCurrentThreadIdentity();
|
||||
std::atomic<int> idles(0);
|
||||
std::atomic<int> busies(0);
|
||||
base_internal::MallocExtension *old =
|
||||
base_internal::MallocExtension::instance();
|
||||
MockMallocExtension mock(old, id, &idles, &busies);
|
||||
base_internal::MallocExtension::Register(&mock);
|
||||
std::atomic<int> sync(0);
|
||||
|
||||
std::thread t([id, &idles, &sync]() {
|
||||
// Wait for the main thread to begin the wait process
|
||||
while (0 == sync.load(std::memory_order_relaxed)) {
|
||||
SleepFor(absl::Milliseconds(1));
|
||||
}
|
||||
// Wait for main thread to become idle, then wake it
|
||||
// pretend time is passing--enough of these should cause an idling.
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Tick(id);
|
||||
}
|
||||
while (0 == idles.load(std::memory_order_relaxed)) {
|
||||
// Keep ticking, just in case.
|
||||
Tick(id);
|
||||
SleepFor(absl::Milliseconds(1));
|
||||
}
|
||||
Post(id);
|
||||
});
|
||||
|
||||
idles.store(0, std::memory_order_relaxed); // In case we slept earlier.
|
||||
sync.store(1, std::memory_order_relaxed);
|
||||
Wait(KernelTimeout::Never());
|
||||
|
||||
// t will wake us once we become idle.
|
||||
EXPECT_LT(0, busies.load(std::memory_order_relaxed));
|
||||
t.join();
|
||||
base_internal::MallocExtension::Register(old);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace synchronization_internal
|
||||
|
|
|
@ -40,8 +40,6 @@
|
|||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/base/internal/malloc_extension.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/internal/thread_identity.h"
|
||||
#include "absl/base/optimization.h"
|
||||
|
@ -59,7 +57,6 @@ static void MaybeBecomeIdle() {
|
|||
const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
|
||||
if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
|
||||
identity->is_idle.store(true, std::memory_order_relaxed);
|
||||
base_internal::MallocExtension::instance()->MarkThreadIdle();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -174,8 +174,10 @@ inline Duration MakeDurationFromU128(uint128 u128, bool is_neg) {
|
|||
// name intN_t designates a signed integer type with width N, no padding
|
||||
// bits, and a two's complement representation." So, we can convert to
|
||||
// and from the corresponding uint64_t value using a bit cast.
|
||||
inline uint64_t EncodeTwosComp(int64_t v) { return bit_cast<uint64_t>(v); }
|
||||
inline int64_t DecodeTwosComp(uint64_t v) { return bit_cast<int64_t>(v); }
|
||||
inline uint64_t EncodeTwosComp(int64_t v) {
|
||||
return absl::bit_cast<uint64_t>(v);
|
||||
}
|
||||
inline int64_t DecodeTwosComp(uint64_t v) { return absl::bit_cast<int64_t>(v); }
|
||||
|
||||
// Note: The overflow detection in this function is done using greater/less *or
|
||||
// equal* because kint64max/min is too large to be represented exactly in a
|
||||
|
|
|
@ -21,15 +21,21 @@
|
|||
#include "absl/base/internal/exception_safety_testing.h"
|
||||
|
||||
using Thrower = absl::ThrowingValue<>;
|
||||
using NoThrowMoveThrower =
|
||||
absl::ThrowingValue<absl::NoThrow::kMoveCtor | absl::NoThrow::kMoveAssign>;
|
||||
using ThrowerList = std::initializer_list<Thrower>;
|
||||
using ThrowerVec = std::vector<Thrower>;
|
||||
using ThrowingAlloc = absl::ThrowingAllocator<Thrower>;
|
||||
using ThrowingThrowerVec = std::vector<Thrower, ThrowingAlloc>;
|
||||
|
||||
namespace absl {
|
||||
namespace {
|
||||
|
||||
testing::AssertionResult AbslCheckInvariants(absl::any* a,
|
||||
InternalAbslNamespaceFinder) {
|
||||
class AnyExceptionSafety : public ::testing::Test {
|
||||
private:
|
||||
absl::ConstructorTracker inspector_;
|
||||
};
|
||||
|
||||
testing::AssertionResult AnyInvariants(absl::any* a) {
|
||||
using testing::AssertionFailure;
|
||||
using testing::AssertionSuccess;
|
||||
|
||||
|
@ -69,17 +75,10 @@ testing::AssertionResult AbslCheckInvariants(absl::any* a,
|
|||
return AssertionSuccess();
|
||||
}
|
||||
|
||||
} // namespace absl
|
||||
|
||||
namespace {
|
||||
|
||||
class AnyExceptionSafety : public ::testing::Test {
|
||||
private:
|
||||
absl::ConstructorTracker inspector_;
|
||||
};
|
||||
|
||||
testing::AssertionResult AnyIsEmpty(absl::any* a) {
|
||||
if (!a->has_value()) return testing::AssertionSuccess();
|
||||
if (!a->has_value()) {
|
||||
return testing::AssertionSuccess();
|
||||
}
|
||||
return testing::AssertionFailure()
|
||||
<< "a should be empty, but instead has value "
|
||||
<< absl::any_cast<Thrower>(*a).Get();
|
||||
|
@ -100,101 +99,70 @@ TEST_F(AnyExceptionSafety, Ctors) {
|
|||
absl::in_place_type_t<ThrowingThrowerVec>(), {val}, ThrowingAlloc());
|
||||
}
|
||||
|
||||
struct OneFactory {
|
||||
std::unique_ptr<absl::any> operator()() const {
|
||||
return absl::make_unique<absl::any>(absl::in_place_type_t<Thrower>(), 1,
|
||||
absl::no_throw_ctor);
|
||||
}
|
||||
};
|
||||
|
||||
struct EmptyFactory {
|
||||
std::unique_ptr<absl::any> operator()() const {
|
||||
return absl::make_unique<absl::any>();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(AnyExceptionSafety, Assignment) {
|
||||
auto thrower_comp = [](const absl::any& l, const absl::any& r) {
|
||||
return absl::any_cast<Thrower>(l) == absl::any_cast<Thrower>(r);
|
||||
auto original =
|
||||
absl::any(absl::in_place_type_t<Thrower>(), 1, absl::no_throw_ctor);
|
||||
auto any_is_strong = [original](absl::any* ap) {
|
||||
return testing::AssertionResult(ap->has_value() &&
|
||||
absl::any_cast<Thrower>(original) ==
|
||||
absl::any_cast<Thrower>(*ap));
|
||||
};
|
||||
auto any_strong_tester = absl::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(original)
|
||||
.WithInvariants(AnyInvariants, any_is_strong);
|
||||
|
||||
OneFactory one_factory;
|
||||
|
||||
absl::ThrowingValue<absl::NoThrow::kMoveCtor | absl::NoThrow::kMoveAssign>
|
||||
moveable_val(2);
|
||||
Thrower val(2);
|
||||
absl::any any_val(val);
|
||||
NoThrowMoveThrower mv_val(2);
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
one_factory, [&any_val](absl::any* ap) { *ap = any_val; },
|
||||
absl::StrongGuarantee(one_factory, thrower_comp)));
|
||||
auto assign_any = [&any_val](absl::any* ap) { *ap = any_val; };
|
||||
auto assign_val = [&val](absl::any* ap) { *ap = val; };
|
||||
auto move = [&val](absl::any* ap) { *ap = std::move(val); };
|
||||
auto move_movable = [&mv_val](absl::any* ap) { *ap = std::move(mv_val); };
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
one_factory, [&val](absl::any* ap) { *ap = val; },
|
||||
absl::StrongGuarantee(one_factory, thrower_comp)));
|
||||
EXPECT_TRUE(any_strong_tester.Test(assign_any));
|
||||
EXPECT_TRUE(any_strong_tester.Test(assign_val));
|
||||
EXPECT_TRUE(any_strong_tester.Test(move));
|
||||
EXPECT_TRUE(any_strong_tester.Test(move_movable));
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
one_factory, [&val](absl::any* ap) { *ap = std::move(val); },
|
||||
absl::StrongGuarantee(one_factory, thrower_comp)));
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
one_factory,
|
||||
[&moveable_val](absl::any* ap) { *ap = std::move(moveable_val); },
|
||||
absl::StrongGuarantee(one_factory, thrower_comp)));
|
||||
|
||||
EmptyFactory empty_factory;
|
||||
auto empty_comp = [](const absl::any& l, const absl::any& r) {
|
||||
return !(l.has_value() || r.has_value());
|
||||
auto empty_any_is_strong = [](absl::any* ap) {
|
||||
return testing::AssertionResult{!ap->has_value()};
|
||||
};
|
||||
auto strong_empty_any_tester =
|
||||
absl::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(absl::any{})
|
||||
.WithInvariants(AnyInvariants, empty_any_is_strong);
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
empty_factory, [&any_val](absl::any* ap) { *ap = any_val; },
|
||||
absl::StrongGuarantee(empty_factory, empty_comp)));
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
empty_factory, [&val](absl::any* ap) { *ap = val; },
|
||||
absl::StrongGuarantee(empty_factory, empty_comp)));
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
empty_factory, [&val](absl::any* ap) { *ap = std::move(val); },
|
||||
absl::StrongGuarantee(empty_factory, empty_comp)));
|
||||
EXPECT_TRUE(strong_empty_any_tester.Test(assign_any));
|
||||
EXPECT_TRUE(strong_empty_any_tester.Test(assign_val));
|
||||
EXPECT_TRUE(strong_empty_any_tester.Test(move));
|
||||
}
|
||||
// libstdc++ std::any fails this test
|
||||
#if !defined(ABSL_HAVE_STD_ANY)
|
||||
TEST_F(AnyExceptionSafety, Emplace) {
|
||||
OneFactory one_factory;
|
||||
auto initial_val =
|
||||
absl::any{absl::in_place_type_t<Thrower>(), 1, absl::no_throw_ctor};
|
||||
auto one_tester = absl::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(initial_val)
|
||||
.WithInvariants(AnyInvariants, AnyIsEmpty);
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
one_factory, [](absl::any* ap) { ap->emplace<Thrower>(2); }, AnyIsEmpty));
|
||||
auto emp_thrower = [](absl::any* ap) { ap->emplace<Thrower>(2); };
|
||||
auto emp_throwervec = [](absl::any* ap) {
|
||||
std::initializer_list<Thrower> il{Thrower(2, absl::no_throw_ctor)};
|
||||
ap->emplace<ThrowerVec>(il);
|
||||
};
|
||||
auto emp_movethrower = [](absl::any* ap) {
|
||||
ap->emplace<NoThrowMoveThrower>(2);
|
||||
};
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
one_factory,
|
||||
[](absl::any* ap) {
|
||||
ap->emplace<absl::ThrowingValue<absl::NoThrow::kMoveCtor |
|
||||
absl::NoThrow::kMoveAssign>>(2);
|
||||
},
|
||||
AnyIsEmpty));
|
||||
EXPECT_TRUE(one_tester.Test(emp_thrower));
|
||||
EXPECT_TRUE(one_tester.Test(emp_throwervec));
|
||||
EXPECT_TRUE(one_tester.Test(emp_movethrower));
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(one_factory,
|
||||
[](absl::any* ap) {
|
||||
std::initializer_list<Thrower> il{
|
||||
Thrower(2, absl::no_throw_ctor)};
|
||||
ap->emplace<ThrowerVec>(il);
|
||||
},
|
||||
AnyIsEmpty));
|
||||
auto empty_tester = one_tester.WithInitialValue(absl::any{});
|
||||
|
||||
EmptyFactory empty_factory;
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(
|
||||
empty_factory, [](absl::any* ap) { ap->emplace<Thrower>(2); },
|
||||
AnyIsEmpty));
|
||||
|
||||
EXPECT_TRUE(absl::TestExceptionSafety(empty_factory,
|
||||
[](absl::any* ap) {
|
||||
std::initializer_list<Thrower> il{
|
||||
Thrower(2, absl::no_throw_ctor)};
|
||||
ap->emplace<ThrowerVec>(il);
|
||||
},
|
||||
AnyIsEmpty));
|
||||
EXPECT_TRUE(empty_tester.Test(emp_thrower));
|
||||
EXPECT_TRUE(empty_tester.Test(emp_throwervec));
|
||||
}
|
||||
#endif // ABSL_HAVE_STD_ANY
|
||||
|
||||
|
|
|
@ -553,7 +553,7 @@ TEST(AnyTest, Move) {
|
|||
absl::any tmp4(4);
|
||||
absl::any o4(std::move(tmp4)); // move construct
|
||||
EXPECT_EQ(4, absl::any_cast<int>(o4));
|
||||
o4 = o4; // self assign
|
||||
o4 = *&o4; // self assign
|
||||
EXPECT_EQ(4, absl::any_cast<int>(o4));
|
||||
EXPECT_TRUE(o4.has_value());
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ using EnableIfConvertibleToSpanConst =
|
|||
// // Construct a Span implicitly from a container
|
||||
// void MyRoutine(absl::Span<const int> a) {
|
||||
// ...
|
||||
// };
|
||||
// }
|
||||
// std::vector v = {1,2,3,4,5};
|
||||
// MyRoutine(v) // convert to Span<const T>
|
||||
//
|
||||
|
@ -235,7 +235,7 @@ using EnableIfConvertibleToSpanConst =
|
|||
//
|
||||
// void MyRoutine(absl::Span<const int> a) {
|
||||
// ...
|
||||
// };
|
||||
// }
|
||||
//
|
||||
// std::vector v = {1,2,3,4,5};
|
||||
// MyRoutine(v);
|
||||
|
|
Loading…
Reference in a new issue