bf29470384
-- bdce7e57e9e886eff1114d0266781b443f7ec639 by Derek Mauro <dmauro@google.com>: Change {Get|Set}EnvironmentVariable to {Get|Set}EnvironmentVariableA for compatibility with /DUNICODE. PiperOrigin-RevId: 239229514 -- 2276ed502326a044a84060d34eb19d499e3a3be2 by Derek Mauro <dmauro@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 239228622 -- a462efb970ff43b08a362ef2343fb75ac1295a50 by Derek Mauro <dmauro@google.com>: Adding linking of CoreFoundation to CMakeLists in absl/time. Import https://github.com/abseil/abseil-cpp/pull/280. Fix #283 PiperOrigin-RevId: 239220785 -- fc23327b97f940c682aae1956cf7a1bf87f88c06 by Derek Mauro <dmauro@google.com>: Add hermetic test script that uses Docker to build with a very recent version of gcc (8.3.0 today) with libstdc++ and bazel. PiperOrigin-RevId: 239220448 -- 418c08a8f6a53e63b84e39473035774417ca3aa7 by Derek Mauro <dmauro@google.com>: Disable part of the variant exeception safety test on move assignment when using versions of libstd++ that contain a bug. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87431#c7 PiperOrigin-RevId: 239062455 -- 799722217aeda79679577843c91d5be62cbcbb42 by Matt Calabrese <calabrese@google.com>: Add internal-only IsSwappable traits corresponding to std::is_swappable and std::is_nothrow_swappable, which are used with the swap implementations of optional and variant. PiperOrigin-RevId: 239049448 -- aa46a036038a3de5c68ac5e5d3b4bf76f818d2ea by CJ Johnson <johnsoncj@google.com>: Make InlinedVectorStorage constructor explicit PiperOrigin-RevId: 239044361 -- 17949715b3aa21c794701f69f2154e91b6acabc3 by CJ Johnson <johnsoncj@google.com>: Add absl namesapce to internal/inlined_vector.h PiperOrigin-RevId: 239030789 -- 834628325953078cc08ed10d23bb8890e5bec897 by Derek Mauro <dmauro@google.com>: Add test script that uses Docker to build Abseil with gcc-4.8, libstdc++, and cmake. PiperOrigin-RevId: 239028433 -- 80fe24149ed73ed2ced995ad1e372fb060c60427 by CJ Johnson <johnsoncj@google.com>: Factors data members of InlinedVector into an impl type called InlinedVectorStorage so that (in future changes) the contents of a vector can be grouped together with a single pointer. PiperOrigin-RevId: 239021086 -- 585331436d5d4d79f845e45dcf79d918a0dc6169 by Derek Mauro <dmauro@google.com>: Add -Wno-missing-field-initializers to gcc compiler flags. gcc-4.x has spurious missing field initializer warnings. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36750 PiperOrigin-RevId: 239017217 -- 94602fe4e33ee3a552a7f2939c0f57a992f55075 by Abseil Team <absl-team@google.com>: Formatting fixes. PiperOrigin-RevId: 238983038 -- a1c1b63c08505574e0a8c491561840cecb2bb93e by Derek Mauro <dmauro@google.com>: Add hermetic test script that uses Docker to build with a very recent version of clang with libc++ and bazel. PiperOrigin-RevId: 238669118 -- e525f8d20bc2f79a0d69336b902f63858f3bff9d by Derek Mauro <dmauro@google.com>: Disable the test optionalTest.InPlaceTSFINAEBug until libc++ is updated. PiperOrigin-RevId: 238661703 -- f99a2a0b5ec424a059678f7f226600f137b4c74e by Derek Mauro <dmauro@google.com>: Correct the check for the FlatHashMap-Any test bug (list conditions instead of platforms when possible) PiperOrigin-RevId: 238653344 -- 777928035dbcbf39f361eb7d10dc3696822f692f by Jon Cohen <cohenjon@google.com>: Add install rules for Abseil CMake. These are attempted to be limited to in-project installation. This serves two purposes -- first it's morally the same as using Abseil in-source, except you don't have to rebuild us every time. Second, the presence of an install rule makes life massively simpler for package manager maintainers. Currently this doesn't install absl tests or testonly libraries. This can be added in a follow-up patch. Fixes #38, Fixes #80, Closes #182 PiperOrigin-RevId: 238645836 -- ded1c6ce697c191b7a6ff14572b3e6d183117b2c by Derek Mauro <dmauro@google.com>: Add hermetic test script that uses Docker to build with a very recent version of clang with libstdc++ and bazel. PiperOrigin-RevId: 238517815 GitOrigin-RevId: bdce7e57e9e886eff1114d0266781b443f7ec639 Change-Id: I6f745869cb8ef63851891ccac05ae9a7dd241c4f
338 lines
13 KiB
C++
338 lines
13 KiB
C++
// Copyright 2017 The Abseil Authors.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
//
|
|
// Produce stack trace
|
|
|
|
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
|
|
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
|
|
|
|
#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
|
|
#include <ucontext.h> // for ucontext_t
|
|
#endif
|
|
|
|
#if !defined(_WIN32)
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#include <cassert>
|
|
#include <cstdint>
|
|
|
|
#include "absl/base/macros.h"
|
|
#include "absl/base/port.h"
|
|
#include "absl/debugging/internal/address_is_readable.h"
|
|
#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
|
|
#include "absl/debugging/stacktrace.h"
|
|
|
|
#include "absl/base/internal/raw_logging.h"
|
|
|
|
#if defined(__linux__) && defined(__i386__)
|
|
// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
|
|
// preceeding "syscall" or "sysenter".
|
|
// If __kernel_vsyscall uses frame pointer, answer 0.
|
|
//
|
|
// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
|
|
// to analyze before giving up. Up to kMaxBytes+1 bytes of
|
|
// instructions could be accessed.
|
|
//
|
|
// Here are known __kernel_vsyscall instruction sequences:
|
|
//
|
|
// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
|
|
// Used on Intel.
|
|
// 0xffffe400 <__kernel_vsyscall+0>: push %ecx
|
|
// 0xffffe401 <__kernel_vsyscall+1>: push %edx
|
|
// 0xffffe402 <__kernel_vsyscall+2>: push %ebp
|
|
// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp
|
|
// 0xffffe405 <__kernel_vsyscall+5>: sysenter
|
|
//
|
|
// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
|
|
// Used on AMD.
|
|
// 0xffffe400 <__kernel_vsyscall+0>: push %ebp
|
|
// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
|
|
// 0xffffe403 <__kernel_vsyscall+3>: syscall
|
|
//
|
|
|
|
// The sequence below isn't actually expected in Google fleet,
|
|
// here only for completeness. Remove this comment from OSS release.
|
|
|
|
// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
|
|
// 0xffffe400 <__kernel_vsyscall+0>: int $0x80
|
|
// 0xffffe401 <__kernel_vsyscall+1>: ret
|
|
//
|
|
static const int kMaxBytes = 10;
|
|
|
|
// We use assert()s instead of DCHECK()s -- this is too low level
|
|
// for DCHECK().
|
|
|
|
static int CountPushInstructions(const unsigned char *const addr) {
|
|
int result = 0;
|
|
for (int i = 0; i < kMaxBytes; ++i) {
|
|
if (addr[i] == 0x89) {
|
|
// "mov reg,reg"
|
|
if (addr[i + 1] == 0xE5) {
|
|
// Found "mov %esp,%ebp".
|
|
return 0;
|
|
}
|
|
++i; // Skip register encoding byte.
|
|
} else if (addr[i] == 0x0F &&
|
|
(addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
|
|
// Found "sysenter" or "syscall".
|
|
return result;
|
|
} else if ((addr[i] & 0xF0) == 0x50) {
|
|
// Found "push %reg".
|
|
++result;
|
|
} else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
|
|
// Found "int $0x80"
|
|
assert(result == 0);
|
|
return 0;
|
|
} else {
|
|
// Unexpected instruction.
|
|
assert(false && "unexpected instruction in __kernel_vsyscall");
|
|
return 0;
|
|
}
|
|
}
|
|
// Unexpected: didn't find SYSENTER or SYSCALL in
|
|
// [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
|
|
assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
// Assume stack frames larger than 100,000 bytes are bogus.
|
|
static const int kMaxFrameBytes = 100000;
|
|
|
|
// Returns the stack frame pointer from signal context, 0 if unknown.
|
|
// vuc is a ucontext_t *. We use void* to avoid the use
|
|
// of ucontext_t on non-POSIX systems.
|
|
static uintptr_t GetFP(const void *vuc) {
|
|
#if !defined(__linux__)
|
|
static_cast<void>(vuc); // Avoid an unused argument compiler warning.
|
|
#else
|
|
if (vuc != nullptr) {
|
|
auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
|
|
#if defined(__i386__)
|
|
const auto bp = uc->uc_mcontext.gregs[REG_EBP];
|
|
const auto sp = uc->uc_mcontext.gregs[REG_ESP];
|
|
#elif defined(__x86_64__)
|
|
const auto bp = uc->uc_mcontext.gregs[REG_RBP];
|
|
const auto sp = uc->uc_mcontext.gregs[REG_RSP];
|
|
#else
|
|
const uintptr_t bp = 0;
|
|
const uintptr_t sp = 0;
|
|
#endif
|
|
// Sanity-check that the base pointer is valid. It should be as long as
|
|
// SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
|
|
// the process is compiled with --copt=-fomit-frame-pointer or
|
|
// --copt=-momit-leaf-frame-pointer.
|
|
//
|
|
// TODO(bcmills): -momit-leaf-frame-pointer is currently the default
|
|
// behavior when building with clang. Talk to the C++ toolchain team about
|
|
// fixing that.
|
|
if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
|
|
|
|
// If bp isn't a plausible frame pointer, return the stack pointer instead.
|
|
// If we're lucky, it points to the start of a stack frame; otherwise, we'll
|
|
// get one frame of garbage in the stack trace and fail the sanity check on
|
|
// the next iteration.
|
|
return sp;
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
// Given a pointer to a stack frame, locate and return the calling
|
|
// stackframe, or return null if no stackframe can be found. Perform sanity
|
|
// checks (the strictness of which is controlled by the boolean parameter
|
|
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
|
template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
|
|
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
|
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
|
static void **NextStackFrame(void **old_fp, const void *uc) {
|
|
void **new_fp = (void **)*old_fp;
|
|
|
|
#if defined(__linux__) && defined(__i386__)
|
|
if (WITH_CONTEXT && uc != nullptr) {
|
|
// How many "push %reg" instructions are there at __kernel_vsyscall?
|
|
// This is constant for a given kernel and processor, so compute
|
|
// it only once.
|
|
static int num_push_instructions = -1; // Sentinel: not computed yet.
|
|
// Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
|
|
// be there.
|
|
static const unsigned char *kernel_rt_sigreturn_address = nullptr;
|
|
static const unsigned char *kernel_vsyscall_address = nullptr;
|
|
if (num_push_instructions == -1) {
|
|
absl::debugging_internal::VDSOSupport vdso;
|
|
if (vdso.IsPresent()) {
|
|
absl::debugging_internal::VDSOSupport::SymbolInfo
|
|
rt_sigreturn_symbol_info;
|
|
absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
|
|
if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
|
|
&rt_sigreturn_symbol_info) ||
|
|
!vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
|
|
&vsyscall_symbol_info) ||
|
|
rt_sigreturn_symbol_info.address == nullptr ||
|
|
vsyscall_symbol_info.address == nullptr) {
|
|
// Unexpected: 32-bit VDSO is present, yet one of the expected
|
|
// symbols is missing or null.
|
|
assert(false && "VDSO is present, but doesn't have expected symbols");
|
|
num_push_instructions = 0;
|
|
} else {
|
|
kernel_rt_sigreturn_address =
|
|
reinterpret_cast<const unsigned char *>(
|
|
rt_sigreturn_symbol_info.address);
|
|
kernel_vsyscall_address =
|
|
reinterpret_cast<const unsigned char *>(
|
|
vsyscall_symbol_info.address);
|
|
num_push_instructions =
|
|
CountPushInstructions(kernel_vsyscall_address);
|
|
}
|
|
} else {
|
|
num_push_instructions = 0;
|
|
}
|
|
}
|
|
if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
|
|
old_fp[1] == kernel_rt_sigreturn_address) {
|
|
const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
|
|
// This kernel does not use frame pointer in its VDSO code,
|
|
// and so %ebp is not suitable for unwinding.
|
|
void **const reg_ebp =
|
|
reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
|
|
const unsigned char *const reg_eip =
|
|
reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
|
|
if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
|
|
reg_eip - kernel_vsyscall_address < kMaxBytes) {
|
|
// We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
|
|
// Restore from 'ucv' instead.
|
|
void **const reg_esp =
|
|
reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
|
|
// Check that alleged %esp is not null and is reasonably aligned.
|
|
if (reg_esp &&
|
|
((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
|
|
// Check that alleged %esp is actually readable. This is to prevent
|
|
// "double fault" in case we hit the first fault due to e.g. stack
|
|
// corruption.
|
|
void *const reg_esp2 = reg_esp[num_push_instructions - 1];
|
|
if (absl::debugging_internal::AddressIsReadable(reg_esp2)) {
|
|
// Alleged %esp is readable, use it for further unwinding.
|
|
new_fp = reinterpret_cast<void **>(reg_esp2);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
|
|
const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);
|
|
|
|
// Check that the transition from frame pointer old_fp to frame
|
|
// pointer new_fp isn't clearly bogus. Skip the checks if new_fp
|
|
// matches the signal context, so that we don't skip out early when
|
|
// using an alternate signal stack.
|
|
//
|
|
// TODO(bcmills): The GetFP call should be completely unnecessary when
|
|
// SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
|
|
// stack by this point), but it is empirically still needed (e.g. when the
|
|
// stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some
|
|
// frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what
|
|
// it's supposed to.
|
|
if (STRICT_UNWINDING &&
|
|
(!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
|
|
// With the stack growing downwards, older stack frame must be
|
|
// at a greater address that the current one.
|
|
if (new_fp_u <= old_fp_u) return nullptr;
|
|
if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
|
|
} else {
|
|
if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below
|
|
// In the non-strict mode, allow discontiguous stack frames.
|
|
// (alternate-signal-stacks for example).
|
|
if (new_fp == old_fp) return nullptr;
|
|
}
|
|
|
|
if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
|
|
#ifdef __i386__
|
|
// On 32-bit machines, the stack pointer can be very close to
|
|
// 0xffffffff, so we explicitly check for a pointer into the
|
|
// last two pages in the address space
|
|
if (new_fp_u >= 0xffffe000) return nullptr;
|
|
#endif
|
|
#if !defined(_WIN32)
|
|
if (!STRICT_UNWINDING) {
|
|
// Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
|
|
// on AMD-based machines with VDSO-enabled kernels.
|
|
// Make an extra sanity check to insure new_fp is readable.
|
|
// Note: NextStackFrame<false>() is only called while the program
|
|
// is already on its last leg, so it's ok to be slow here.
|
|
|
|
if (!absl::debugging_internal::AddressIsReadable(new_fp)) {
|
|
return nullptr;
|
|
}
|
|
}
|
|
#endif
|
|
return new_fp;
|
|
}
|
|
|
|
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
|
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
|
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
|
ABSL_ATTRIBUTE_NOINLINE
|
|
static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
|
|
const void *ucp, int *min_dropped_frames) {
|
|
int n = 0;
|
|
void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
|
|
|
|
while (fp && n < max_depth) {
|
|
if (*(fp + 1) == reinterpret_cast<void *>(0)) {
|
|
// In 64-bit code, we often see a frame that
|
|
// points to itself and has a return address of 0.
|
|
break;
|
|
}
|
|
void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
|
|
if (skip_count > 0) {
|
|
skip_count--;
|
|
} else {
|
|
result[n] = *(fp + 1);
|
|
if (IS_STACK_FRAMES) {
|
|
if (next_fp > fp) {
|
|
sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
|
|
} else {
|
|
// A frame-size of 0 is used to indicate unknown frame size.
|
|
sizes[n] = 0;
|
|
}
|
|
}
|
|
n++;
|
|
}
|
|
fp = next_fp;
|
|
}
|
|
if (min_dropped_frames != nullptr) {
|
|
// Implementation detail: we clamp the max of frames we are willing to
|
|
// count, so as not to spend too much time in the loop below.
|
|
const int kMaxUnwind = 1000;
|
|
int j = 0;
|
|
for (; fp != nullptr && j < kMaxUnwind; j++) {
|
|
fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
|
|
}
|
|
*min_dropped_frames = j;
|
|
}
|
|
return n;
|
|
}
|
|
|
|
namespace absl {
|
|
namespace debugging_internal {
|
|
bool StackTraceWorksForTest() {
|
|
return true;
|
|
}
|
|
} // namespace debugging_internal
|
|
} // namespace absl
|
|
|
|
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
|