Add 'third_party/abseil_cpp/' from commit '768eb2ca28'
git-subtree-dir: third_party/abseil_cpp git-subtree-mainline:ffb2ae54begit-subtree-split:768eb2ca28
This commit is contained in:
commit
fc8dc48020
1276 changed files with 208196 additions and 0 deletions
138
third_party/abseil_cpp/absl/debugging/internal/address_is_readable.cc
vendored
Normal file
138
third_party/abseil_cpp/absl/debugging/internal/address_is_readable.cc
vendored
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// base::AddressIsReadable() probes an address to see whether it is readable,
|
||||
// without faulting.
|
||||
|
||||
#include "absl/debugging/internal/address_is_readable.h"
|
||||
|
||||
#if !defined(__linux__) || defined(__ANDROID__)
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// On platforms other than Linux, just return true.
|
||||
bool AddressIsReadable(const void* /* addr */) { return true; }
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#else
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cerrno>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/base/internal/errno_saver.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Pack a pid and two file descriptors into a 64-bit word,
|
||||
// using 16, 24, and 24 bits for each respectively.
|
||||
static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
|
||||
ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
|
||||
"fd out of range");
|
||||
return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
|
||||
}
|
||||
|
||||
// Unpack x into a pid and two file descriptors, where x was created with
|
||||
// Pack().
|
||||
static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
|
||||
*pid = x >> 48;
|
||||
*read_fd = (x >> 24) & 0xffffff;
|
||||
*write_fd = x & 0xffffff;
|
||||
}
|
||||
|
||||
// Return whether the byte at *addr is readable, without faulting.
|
||||
// Save and restores errno. Returns true on systems where
|
||||
// unimplemented.
|
||||
// This is a namespace-scoped variable for correct zero-initialization.
|
||||
static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
|
||||
bool AddressIsReadable(const void *addr) {
|
||||
absl::base_internal::ErrnoSaver errno_saver;
|
||||
// We test whether a byte is readable by using write(). Normally, this would
|
||||
// be done via a cached file descriptor to /dev/null, but linux fails to
|
||||
// check whether the byte is readable when the destination is /dev/null, so
|
||||
// we use a cached pipe. We store the pid of the process that created the
|
||||
// pipe to handle the case where a process forks, and the child closes all
|
||||
// the file descriptors and then calls this routine. This is not perfect:
|
||||
// the child could use the routine, then close all file descriptors and then
|
||||
// use this routine again. But the likely use of this routine is when
|
||||
// crashing, to test the validity of pages when dumping the stack. Beware
|
||||
// that we may leak file descriptors, but we're unlikely to leak many.
|
||||
int bytes_written;
|
||||
int current_pid = getpid() & 0xffff; // we use only the low order 16 bits
|
||||
do { // until we do not get EBADF trying to use file descriptors
|
||||
int pid;
|
||||
int read_fd;
|
||||
int write_fd;
|
||||
uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed);
|
||||
Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
|
||||
while (current_pid != pid) {
|
||||
int p[2];
|
||||
// new pipe
|
||||
if (pipe(p) != 0) {
|
||||
ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
|
||||
}
|
||||
fcntl(p[0], F_SETFD, FD_CLOEXEC);
|
||||
fcntl(p[1], F_SETFD, FD_CLOEXEC);
|
||||
uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
|
||||
if (pid_and_fds.compare_exchange_strong(
|
||||
local_pid_and_fds, new_pid_and_fds, std::memory_order_relaxed,
|
||||
std::memory_order_relaxed)) {
|
||||
local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
|
||||
} else { // fds not exposed to other threads; we can close them.
|
||||
close(p[0]);
|
||||
close(p[1]);
|
||||
local_pid_and_fds = pid_and_fds.load(std::memory_order_relaxed);
|
||||
}
|
||||
Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
|
||||
}
|
||||
errno = 0;
|
||||
// Use syscall(SYS_write, ...) instead of write() to prevent ASAN
|
||||
// and other checkers from complaining about accesses to arbitrary
|
||||
// memory.
|
||||
do {
|
||||
bytes_written = syscall(SYS_write, write_fd, addr, 1);
|
||||
} while (bytes_written == -1 && errno == EINTR);
|
||||
if (bytes_written == 1) { // remove the byte from the pipe
|
||||
char c;
|
||||
while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
|
||||
}
|
||||
}
|
||||
if (errno == EBADF) { // Descriptors invalid.
|
||||
// If pid_and_fds contains the problematic file descriptors we just used,
|
||||
// this call will forget them, and the loop will try again.
|
||||
pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
|
||||
std::memory_order_relaxed,
|
||||
std::memory_order_relaxed);
|
||||
}
|
||||
} while (errno == EBADF);
|
||||
return bytes_written == 1;
|
||||
}
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif
|
||||
32
third_party/abseil_cpp/absl/debugging/internal/address_is_readable.h
vendored
Normal file
32
third_party/abseil_cpp/absl/debugging/internal/address_is_readable.h
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
|
||||
|
||||
#include "absl/base/config.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Return whether the byte at *addr is readable, without faulting.
|
||||
// Save and restores errno.
|
||||
bool AddressIsReadable(const void *addr);
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_
|
||||
1895
third_party/abseil_cpp/absl/debugging/internal/demangle.cc
vendored
Normal file
1895
third_party/abseil_cpp/absl/debugging/internal/demangle.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
71
third_party/abseil_cpp/absl/debugging/internal/demangle.h
vendored
Normal file
71
third_party/abseil_cpp/absl/debugging/internal/demangle.h
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// An async-signal-safe and thread-safe demangler for Itanium C++ ABI
|
||||
// (aka G++ V3 ABI).
|
||||
//
|
||||
// The demangler is implemented to be used in async signal handlers to
|
||||
// symbolize stack traces. We cannot use libstdc++'s
|
||||
// abi::__cxa_demangle() in such signal handlers since it's not async
|
||||
// signal safe (it uses malloc() internally).
|
||||
//
|
||||
// Note that this demangler doesn't support full demangling. More
|
||||
// specifically, it doesn't print types of function parameters and
|
||||
// types of template arguments. It just skips them. However, it's
|
||||
// still very useful to extract basic information such as class,
|
||||
// function, constructor, destructor, and operator names.
|
||||
//
|
||||
// See the implementation note in demangle.cc if you are interested.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// | Mangled Name | The Demangler | abi::__cxa_demangle()
|
||||
// |---------------|---------------|-----------------------
|
||||
// | _Z1fv | f() | f()
|
||||
// | _Z1fi | f() | f(int)
|
||||
// | _Z3foo3bar | foo() | foo(bar)
|
||||
// | _Z1fIiEvi | f<>() | void f<int>(int)
|
||||
// | _ZN1N1fE | N::f | N::f
|
||||
// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar()
|
||||
// | _Zrm1XS_" | operator%() | operator%(X, X)
|
||||
// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo()
|
||||
// | _Z1fSs | f() | f(std::basic_string<char,
|
||||
// | | | std::char_traits<char>,
|
||||
// | | | std::allocator<char> >)
|
||||
//
|
||||
// See the unit test for more examples.
|
||||
//
|
||||
// Note: we might want to write demanglers for ABIs other than Itanium
|
||||
// C++ ABI in the future.
|
||||
//
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
|
||||
|
||||
#include "absl/base/config.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Demangle `mangled`. On success, return true and write the
|
||||
// demangled symbol name to `out`. Otherwise, return false.
|
||||
// `out` is modified even if demangling is unsuccessful.
|
||||
bool Demangle(const char *mangled, char *out, int out_size);
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_
|
||||
195
third_party/abseil_cpp/absl/debugging/internal/demangle_test.cc
vendored
Normal file
195
third_party/abseil_cpp/absl/debugging/internal/demangle_test.cc
vendored
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/debugging/internal/demangle.h"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/debugging/internal/stack_consumption.h"
|
||||
#include "absl/memory/memory.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
namespace {
|
||||
|
||||
// A wrapper function for Demangle() to make the unit test simple.
|
||||
static const char *DemangleIt(const char * const mangled) {
|
||||
static char demangled[4096];
|
||||
if (Demangle(mangled, demangled, sizeof(demangled))) {
|
||||
return demangled;
|
||||
} else {
|
||||
return mangled;
|
||||
}
|
||||
}
|
||||
|
||||
// Test corner cases of bounary conditions.
|
||||
TEST(Demangle, CornerCases) {
|
||||
char tmp[10];
|
||||
EXPECT_TRUE(Demangle("_Z6foobarv", tmp, sizeof(tmp)));
|
||||
// sizeof("foobar()") == 9
|
||||
EXPECT_STREQ("foobar()", tmp);
|
||||
EXPECT_TRUE(Demangle("_Z6foobarv", tmp, 9));
|
||||
EXPECT_STREQ("foobar()", tmp);
|
||||
EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 8)); // Not enough.
|
||||
EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 1));
|
||||
EXPECT_FALSE(Demangle("_Z6foobarv", tmp, 0));
|
||||
EXPECT_FALSE(Demangle("_Z6foobarv", nullptr, 0)); // Should not cause SEGV.
|
||||
EXPECT_FALSE(Demangle("_Z1000000", tmp, 9));
|
||||
}
|
||||
|
||||
// Test handling of functions suffixed with .clone.N, which is used
|
||||
// by GCC 4.5.x (and our locally-modified version of GCC 4.4.x), and
|
||||
// .constprop.N and .isra.N, which are used by GCC 4.6.x. These
|
||||
// suffixes are used to indicate functions which have been cloned
|
||||
// during optimization. We ignore these suffixes.
|
||||
TEST(Demangle, Clones) {
|
||||
char tmp[20];
|
||||
EXPECT_TRUE(Demangle("_ZL3Foov", tmp, sizeof(tmp)));
|
||||
EXPECT_STREQ("Foo()", tmp);
|
||||
EXPECT_TRUE(Demangle("_ZL3Foov.clone.3", tmp, sizeof(tmp)));
|
||||
EXPECT_STREQ("Foo()", tmp);
|
||||
EXPECT_TRUE(Demangle("_ZL3Foov.constprop.80", tmp, sizeof(tmp)));
|
||||
EXPECT_STREQ("Foo()", tmp);
|
||||
EXPECT_TRUE(Demangle("_ZL3Foov.isra.18", tmp, sizeof(tmp)));
|
||||
EXPECT_STREQ("Foo()", tmp);
|
||||
EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
|
||||
EXPECT_STREQ("Foo()", tmp);
|
||||
// Invalid (truncated), should not demangle.
|
||||
EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
|
||||
// Invalid (.clone. not followed by number), should not demangle.
|
||||
EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
|
||||
// Invalid (.clone. followed by non-number), should not demangle.
|
||||
EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
|
||||
// Invalid (.constprop. not followed by number), should not demangle.
|
||||
EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
|
||||
}
|
||||
|
||||
// Tests that verify that Demangle footprint is within some limit.
|
||||
// They are not to be run under sanitizers as the sanitizers increase
|
||||
// stack consumption by about 4x.
|
||||
#if defined(ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION) && \
|
||||
!defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
|
||||
!defined(THREAD_SANITIZER)
|
||||
|
||||
static const char *g_mangled;
|
||||
static char g_demangle_buffer[4096];
|
||||
static char *g_demangle_result;
|
||||
|
||||
static void DemangleSignalHandler(int signo) {
|
||||
if (Demangle(g_mangled, g_demangle_buffer, sizeof(g_demangle_buffer))) {
|
||||
g_demangle_result = g_demangle_buffer;
|
||||
} else {
|
||||
g_demangle_result = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// Call Demangle and figure out the stack footprint of this call.
|
||||
static const char *DemangleStackConsumption(const char *mangled,
|
||||
int *stack_consumed) {
|
||||
g_mangled = mangled;
|
||||
*stack_consumed = GetSignalHandlerStackConsumption(DemangleSignalHandler);
|
||||
ABSL_RAW_LOG(INFO, "Stack consumption of Demangle: %d", *stack_consumed);
|
||||
return g_demangle_result;
|
||||
}
|
||||
|
||||
// Demangle stack consumption should be within 8kB for simple mangled names
|
||||
// with some level of nesting. With alternate signal stack we have 64K,
|
||||
// but some signal handlers run on thread stack, and could have arbitrarily
|
||||
// little space left (so we don't want to make this number too large).
|
||||
const int kStackConsumptionUpperLimit = 8192;
|
||||
|
||||
// Returns a mangled name nested to the given depth.
|
||||
static std::string NestedMangledName(int depth) {
|
||||
std::string mangled_name = "_Z1a";
|
||||
if (depth > 0) {
|
||||
mangled_name += "IXL";
|
||||
mangled_name += NestedMangledName(depth - 1);
|
||||
mangled_name += "EEE";
|
||||
}
|
||||
return mangled_name;
|
||||
}
|
||||
|
||||
TEST(Demangle, DemangleStackConsumption) {
|
||||
// Measure stack consumption of Demangle for nested mangled names of varying
|
||||
// depth. Since Demangle is implemented as a recursive descent parser,
|
||||
// stack consumption will grow as the nesting depth increases. By measuring
|
||||
// the stack consumption for increasing depths, we can see the growing
|
||||
// impact of any stack-saving changes made to the code for Demangle.
|
||||
int stack_consumed = 0;
|
||||
|
||||
const char *demangled =
|
||||
DemangleStackConsumption("_Z6foobarv", &stack_consumed);
|
||||
EXPECT_STREQ("foobar()", demangled);
|
||||
EXPECT_GT(stack_consumed, 0);
|
||||
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
|
||||
|
||||
const std::string nested_mangled_name0 = NestedMangledName(0);
|
||||
demangled = DemangleStackConsumption(nested_mangled_name0.c_str(),
|
||||
&stack_consumed);
|
||||
EXPECT_STREQ("a", demangled);
|
||||
EXPECT_GT(stack_consumed, 0);
|
||||
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
|
||||
|
||||
const std::string nested_mangled_name1 = NestedMangledName(1);
|
||||
demangled = DemangleStackConsumption(nested_mangled_name1.c_str(),
|
||||
&stack_consumed);
|
||||
EXPECT_STREQ("a<>", demangled);
|
||||
EXPECT_GT(stack_consumed, 0);
|
||||
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
|
||||
|
||||
const std::string nested_mangled_name2 = NestedMangledName(2);
|
||||
demangled = DemangleStackConsumption(nested_mangled_name2.c_str(),
|
||||
&stack_consumed);
|
||||
EXPECT_STREQ("a<>", demangled);
|
||||
EXPECT_GT(stack_consumed, 0);
|
||||
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
|
||||
|
||||
const std::string nested_mangled_name3 = NestedMangledName(3);
|
||||
demangled = DemangleStackConsumption(nested_mangled_name3.c_str(),
|
||||
&stack_consumed);
|
||||
EXPECT_STREQ("a<>", demangled);
|
||||
EXPECT_GT(stack_consumed, 0);
|
||||
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
|
||||
}
|
||||
|
||||
#endif // Stack consumption tests
|
||||
|
||||
static void TestOnInput(const char* input) {
|
||||
static const int kOutSize = 1048576;
|
||||
auto out = absl::make_unique<char[]>(kOutSize);
|
||||
Demangle(input, out.get(), kOutSize);
|
||||
}
|
||||
|
||||
TEST(DemangleRegression, NegativeLength) {
|
||||
TestOnInput("_ZZn4");
|
||||
}
|
||||
|
||||
TEST(DemangleRegression, DeeplyNestedArrayType) {
|
||||
const int depth = 100000;
|
||||
std::string data = "_ZStI";
|
||||
data.reserve(data.size() + 3 * depth + 1);
|
||||
for (int i = 0; i < depth; i++) {
|
||||
data += "A1_";
|
||||
}
|
||||
TestOnInput(data.c_str());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
382
third_party/abseil_cpp/absl/debugging/internal/elf_mem_image.cc
vendored
Normal file
382
third_party/abseil_cpp/absl/debugging/internal/elf_mem_image.cc
vendored
Normal file
|
|
@ -0,0 +1,382 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Allow dynamic symbol lookup in an in-memory Elf image.
|
||||
//
|
||||
|
||||
#include "absl/debugging/internal/elf_mem_image.h"
|
||||
|
||||
#ifdef ABSL_HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
|
||||
|
||||
#include <string.h>
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
|
||||
// From binutils/include/elf/common.h (this doesn't appear to be documented
|
||||
// anywhere else).
|
||||
//
|
||||
// /* This flag appears in a Versym structure. It means that the symbol
|
||||
// is hidden, and is only visible with an explicit version number.
|
||||
// This is a GNU extension. */
|
||||
// #define VERSYM_HIDDEN 0x8000
|
||||
//
|
||||
// /* This is the mask for the rest of the Versym information. */
|
||||
// #define VERSYM_VERSION 0x7fff
|
||||
|
||||
#define VERSYM_VERSION 0x7fff
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
namespace {
|
||||
|
||||
#if __WORDSIZE == 32
|
||||
const int kElfClass = ELFCLASS32;
|
||||
int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); }
|
||||
int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); }
|
||||
#elif __WORDSIZE == 64
|
||||
const int kElfClass = ELFCLASS64;
|
||||
int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); }
|
||||
int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); }
|
||||
#else
|
||||
const int kElfClass = -1;
|
||||
int ElfBind(const ElfW(Sym) *) {
|
||||
ABSL_RAW_LOG(FATAL, "Unexpected word size");
|
||||
return 0;
|
||||
}
|
||||
int ElfType(const ElfW(Sym) *) {
|
||||
ABSL_RAW_LOG(FATAL, "Unexpected word size");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Extract an element from one of the ELF tables, cast it to desired type.
|
||||
// This is just a simple arithmetic and a glorified cast.
|
||||
// Callers are responsible for bounds checking.
|
||||
template <typename T>
|
||||
const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset,
|
||||
ElfW(Word) element_size, size_t index) {
|
||||
return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
|
||||
+ table_offset
|
||||
+ index * element_size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// The value of this variable doesn't matter; it's used only for its
|
||||
// unique address.
|
||||
const int ElfMemImage::kInvalidBaseSentinel = 0;
|
||||
|
||||
ElfMemImage::ElfMemImage(const void *base) {
|
||||
ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");
|
||||
Init(base);
|
||||
}
|
||||
|
||||
int ElfMemImage::GetNumSymbols() const {
|
||||
if (!hash_) {
|
||||
return 0;
|
||||
}
|
||||
// See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
|
||||
return hash_[1];
|
||||
}
|
||||
|
||||
const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
|
||||
ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
|
||||
return dynsym_ + index;
|
||||
}
|
||||
|
||||
const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
|
||||
ABSL_RAW_CHECK(index < GetNumSymbols(), "index out of range");
|
||||
return versym_ + index;
|
||||
}
|
||||
|
||||
const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
|
||||
ABSL_RAW_CHECK(index < ehdr_->e_phnum, "index out of range");
|
||||
return GetTableElement<ElfW(Phdr)>(ehdr_,
|
||||
ehdr_->e_phoff,
|
||||
ehdr_->e_phentsize,
|
||||
index);
|
||||
}
|
||||
|
||||
const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
|
||||
ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
|
||||
return dynstr_ + offset;
|
||||
}
|
||||
|
||||
const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
|
||||
if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
|
||||
// Symbol corresponds to "special" (e.g. SHN_ABS) section.
|
||||
return reinterpret_cast<const void *>(sym->st_value);
|
||||
}
|
||||
ABSL_RAW_CHECK(link_base_ < sym->st_value, "symbol out of range");
|
||||
return GetTableElement<char>(ehdr_, 0, 1, sym->st_value - link_base_);
|
||||
}
|
||||
|
||||
const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
|
||||
ABSL_RAW_CHECK(0 <= index && static_cast<size_t>(index) <= verdefnum_,
|
||||
"index out of range");
|
||||
const ElfW(Verdef) *version_definition = verdef_;
|
||||
while (version_definition->vd_ndx < index && version_definition->vd_next) {
|
||||
const char *const version_definition_as_char =
|
||||
reinterpret_cast<const char *>(version_definition);
|
||||
version_definition =
|
||||
reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
|
||||
version_definition->vd_next);
|
||||
}
|
||||
return version_definition->vd_ndx == index ? version_definition : nullptr;
|
||||
}
|
||||
|
||||
const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
|
||||
const ElfW(Verdef) *verdef) const {
|
||||
return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
|
||||
}
|
||||
|
||||
const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
|
||||
ABSL_RAW_CHECK(offset < strsize_, "offset out of range");
|
||||
return dynstr_ + offset;
|
||||
}
|
||||
|
||||
void ElfMemImage::Init(const void *base) {
|
||||
ehdr_ = nullptr;
|
||||
dynsym_ = nullptr;
|
||||
dynstr_ = nullptr;
|
||||
versym_ = nullptr;
|
||||
verdef_ = nullptr;
|
||||
hash_ = nullptr;
|
||||
strsize_ = 0;
|
||||
verdefnum_ = 0;
|
||||
link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
|
||||
if (!base) {
|
||||
return;
|
||||
}
|
||||
const char *const base_as_char = reinterpret_cast<const char *>(base);
|
||||
if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
|
||||
base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
int elf_class = base_as_char[EI_CLASS];
|
||||
if (elf_class != kElfClass) {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
switch (base_as_char[EI_DATA]) {
|
||||
case ELFDATA2LSB: {
|
||||
if (__LITTLE_ENDIAN != __BYTE_ORDER) {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ELFDATA2MSB: {
|
||||
if (__BIG_ENDIAN != __BYTE_ORDER) {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
|
||||
const ElfW(Phdr) *dynamic_program_header = nullptr;
|
||||
for (int i = 0; i < ehdr_->e_phnum; ++i) {
|
||||
const ElfW(Phdr) *const program_header = GetPhdr(i);
|
||||
switch (program_header->p_type) {
|
||||
case PT_LOAD:
|
||||
if (!~link_base_) {
|
||||
link_base_ = program_header->p_vaddr;
|
||||
}
|
||||
break;
|
||||
case PT_DYNAMIC:
|
||||
dynamic_program_header = program_header;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!~link_base_ || !dynamic_program_header) {
|
||||
assert(false);
|
||||
// Mark this image as not present. Can not recur infinitely.
|
||||
Init(nullptr);
|
||||
return;
|
||||
}
|
||||
ptrdiff_t relocation =
|
||||
base_as_char - reinterpret_cast<const char *>(link_base_);
|
||||
ElfW(Dyn) *dynamic_entry =
|
||||
reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
|
||||
relocation);
|
||||
for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
|
||||
const ElfW(Xword) value = dynamic_entry->d_un.d_val + relocation;
|
||||
switch (dynamic_entry->d_tag) {
|
||||
case DT_HASH:
|
||||
hash_ = reinterpret_cast<ElfW(Word) *>(value);
|
||||
break;
|
||||
case DT_SYMTAB:
|
||||
dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
|
||||
break;
|
||||
case DT_STRTAB:
|
||||
dynstr_ = reinterpret_cast<const char *>(value);
|
||||
break;
|
||||
case DT_VERSYM:
|
||||
versym_ = reinterpret_cast<ElfW(Versym) *>(value);
|
||||
break;
|
||||
case DT_VERDEF:
|
||||
verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
|
||||
break;
|
||||
case DT_VERDEFNUM:
|
||||
verdefnum_ = dynamic_entry->d_un.d_val;
|
||||
break;
|
||||
case DT_STRSZ:
|
||||
strsize_ = dynamic_entry->d_un.d_val;
|
||||
break;
|
||||
default:
|
||||
// Unrecognized entries explicitly ignored.
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
|
||||
!verdef_ || !verdefnum_ || !strsize_) {
|
||||
assert(false); // invalid VDSO
|
||||
// Mark this image as not present. Can not recur infinitely.
|
||||
Init(nullptr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bool ElfMemImage::LookupSymbol(const char *name,
|
||||
const char *version,
|
||||
int type,
|
||||
SymbolInfo *info_out) const {
|
||||
for (const SymbolInfo& info : *this) {
|
||||
if (strcmp(info.name, name) == 0 && strcmp(info.version, version) == 0 &&
|
||||
ElfType(info.symbol) == type) {
|
||||
if (info_out) {
|
||||
*info_out = info;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ElfMemImage::LookupSymbolByAddress(const void *address,
|
||||
SymbolInfo *info_out) const {
|
||||
for (const SymbolInfo& info : *this) {
|
||||
const char *const symbol_start =
|
||||
reinterpret_cast<const char *>(info.address);
|
||||
const char *const symbol_end = symbol_start + info.symbol->st_size;
|
||||
if (symbol_start <= address && address < symbol_end) {
|
||||
if (info_out) {
|
||||
// Client wants to know details for that symbol (the usual case).
|
||||
if (ElfBind(info.symbol) == STB_GLOBAL) {
|
||||
// Strong symbol; just return it.
|
||||
*info_out = info;
|
||||
return true;
|
||||
} else {
|
||||
// Weak or local. Record it, but keep looking for a strong one.
|
||||
*info_out = info;
|
||||
}
|
||||
} else {
|
||||
// Client only cares if there is an overlapping symbol.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
|
||||
: index_(index), image_(image) {
|
||||
}
|
||||
|
||||
const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
|
||||
return &info_;
|
||||
}
|
||||
|
||||
const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
|
||||
return info_;
|
||||
}
|
||||
|
||||
bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
|
||||
return this->image_ == rhs.image_ && this->index_ == rhs.index_;
|
||||
}
|
||||
|
||||
bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
|
||||
this->Update(1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
ElfMemImage::SymbolIterator ElfMemImage::begin() const {
|
||||
SymbolIterator it(this, 0);
|
||||
it.Update(0);
|
||||
return it;
|
||||
}
|
||||
|
||||
ElfMemImage::SymbolIterator ElfMemImage::end() const {
|
||||
return SymbolIterator(this, GetNumSymbols());
|
||||
}
|
||||
|
||||
void ElfMemImage::SymbolIterator::Update(int increment) {
|
||||
const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
|
||||
ABSL_RAW_CHECK(image->IsPresent() || increment == 0, "");
|
||||
if (!image->IsPresent()) {
|
||||
return;
|
||||
}
|
||||
index_ += increment;
|
||||
if (index_ >= image->GetNumSymbols()) {
|
||||
index_ = image->GetNumSymbols();
|
||||
return;
|
||||
}
|
||||
const ElfW(Sym) *symbol = image->GetDynsym(index_);
|
||||
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
|
||||
ABSL_RAW_CHECK(symbol && version_symbol, "");
|
||||
const char *const symbol_name = image->GetDynstr(symbol->st_name);
|
||||
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
|
||||
const ElfW(Verdef) *version_definition = nullptr;
|
||||
const char *version_name = "";
|
||||
if (symbol->st_shndx == SHN_UNDEF) {
|
||||
// Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
|
||||
// version_index could well be greater than verdefnum_, so calling
|
||||
// GetVerdef(version_index) may trigger assertion.
|
||||
} else {
|
||||
version_definition = image->GetVerdef(version_index);
|
||||
}
|
||||
if (version_definition) {
|
||||
// I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
|
||||
// optional 2nd if the version has a parent.
|
||||
ABSL_RAW_CHECK(
|
||||
version_definition->vd_cnt == 1 || version_definition->vd_cnt == 2,
|
||||
"wrong number of entries");
|
||||
const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
|
||||
version_name = image->GetVerstr(version_aux->vda_name);
|
||||
}
|
||||
info_.name = symbol_name;
|
||||
info_.version = version_name;
|
||||
info_.address = image->GetSymAddr(symbol);
|
||||
info_.symbol = symbol;
|
||||
}
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_HAVE_ELF_MEM_IMAGE
|
||||
134
third_party/abseil_cpp/absl/debugging/internal/elf_mem_image.h
vendored
Normal file
134
third_party/abseil_cpp/absl/debugging/internal/elf_mem_image.h
vendored
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Copyright 2017 The Abseil Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Allow dynamic symbol lookup for in-memory Elf images.
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
|
||||
|
||||
// Including this will define the __GLIBC__ macro if glibc is being
|
||||
// used.
|
||||
#include <climits>
|
||||
|
||||
#include "absl/base/config.h"
|
||||
|
||||
// Maybe one day we can rewrite this file not to require the elf
|
||||
// symbol extensions in glibc, but for right now we need them.
|
||||
#ifdef ABSL_HAVE_ELF_MEM_IMAGE
|
||||
#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
|
||||
#endif
|
||||
|
||||
#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
|
||||
!defined(__asmjs__) && !defined(__wasm__)
|
||||
#define ABSL_HAVE_ELF_MEM_IMAGE 1
|
||||
#endif
|
||||
|
||||
#ifdef ABSL_HAVE_ELF_MEM_IMAGE
|
||||
|
||||
#include <link.h> // for ElfW
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// An in-memory ELF image (may not exist on disk).
|
||||
class ElfMemImage {
|
||||
private:
|
||||
// Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
|
||||
static const int kInvalidBaseSentinel;
|
||||
|
||||
public:
|
||||
// Sentinel: there could never be an elf image at this address.
|
||||
static constexpr const void *const kInvalidBase =
|
||||
static_cast<const void*>(&kInvalidBaseSentinel);
|
||||
|
||||
// Information about a single vdso symbol.
|
||||
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
|
||||
// Do not free() them or modify through them.
|
||||
struct SymbolInfo {
|
||||
const char *name; // E.g. "__vdso_getcpu"
|
||||
const char *version; // E.g. "LINUX_2.6", could be ""
|
||||
// for unversioned symbol.
|
||||
const void *address; // Relocated symbol address.
|
||||
const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
|
||||
};
|
||||
|
||||
// Supports iteration over all dynamic symbols.
|
||||
class SymbolIterator {
|
||||
public:
|
||||
friend class ElfMemImage;
|
||||
const SymbolInfo *operator->() const;
|
||||
const SymbolInfo &operator*() const;
|
||||
SymbolIterator& operator++();
|
||||
bool operator!=(const SymbolIterator &rhs) const;
|
||||
bool operator==(const SymbolIterator &rhs) const;
|
||||
private:
|
||||
SymbolIterator(const void *const image, int index);
|
||||
void Update(int incr);
|
||||
SymbolInfo info_;
|
||||
int index_;
|
||||
const void *const image_;
|
||||
};
|
||||
|
||||
|
||||
explicit ElfMemImage(const void *base);
|
||||
void Init(const void *base);
|
||||
bool IsPresent() const { return ehdr_ != nullptr; }
|
||||
const ElfW(Phdr)* GetPhdr(int index) const;
|
||||
const ElfW(Sym)* GetDynsym(int index) const;
|
||||
const ElfW(Versym)* GetVersym(int index) const;
|
||||
const ElfW(Verdef)* GetVerdef(int index) const;
|
||||
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
|
||||
const char* GetDynstr(ElfW(Word) offset) const;
|
||||
const void* GetSymAddr(const ElfW(Sym) *sym) const;
|
||||
const char* GetVerstr(ElfW(Word) offset) const;
|
||||
int GetNumSymbols() const;
|
||||
|
||||
SymbolIterator begin() const;
|
||||
SymbolIterator end() const;
|
||||
|
||||
// Look up versioned dynamic symbol in the image.
|
||||
// Returns false if image is not present, or doesn't contain given
|
||||
// symbol/version/type combination.
|
||||
// If info_out is non-null, additional details are filled in.
|
||||
bool LookupSymbol(const char *name, const char *version,
|
||||
int symbol_type, SymbolInfo *info_out) const;
|
||||
|
||||
// Find info about symbol (if any) which overlaps given address.
|
||||
// Returns true if symbol was found; false if image isn't present
|
||||
// or doesn't have a symbol overlapping given address.
|
||||
// If info_out is non-null, additional details are filled in.
|
||||
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
|
||||
|
||||
private:
|
||||
const ElfW(Ehdr) *ehdr_;
|
||||
const ElfW(Sym) *dynsym_;
|
||||
const ElfW(Versym) *versym_;
|
||||
const ElfW(Verdef) *verdef_;
|
||||
const ElfW(Word) *hash_;
|
||||
const char *dynstr_;
|
||||
size_t strsize_;
|
||||
size_t verdefnum_;
|
||||
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
|
||||
};
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_HAVE_ELF_MEM_IMAGE
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_
|
||||
157
third_party/abseil_cpp/absl/debugging/internal/examine_stack.cc
vendored
Normal file
157
third_party/abseil_cpp/absl/debugging/internal/examine_stack.cc
vendored
Normal file
|
|
@ -0,0 +1,157 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#include "absl/debugging/internal/examine_stack.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <csignal>
|
||||
#include <cstdio>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/macros.h"
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
#include "absl/debugging/symbolize.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Returns the program counter from signal context, nullptr if
|
||||
// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
|
||||
// ucontext_t on non-POSIX systems.
|
||||
void* GetProgramCounter(void* vuc) {
|
||||
#ifdef __linux__
|
||||
if (vuc != nullptr) {
|
||||
ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
|
||||
#if defined(__aarch64__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.pc);
|
||||
#elif defined(__arm__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.arm_pc);
|
||||
#elif defined(__i386__)
|
||||
if (14 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.gregs[14]);
|
||||
#elif defined(__mips__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.pc);
|
||||
#elif defined(__powerpc64__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.gp_regs[32]);
|
||||
#elif defined(__powerpc__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.regs->nip);
|
||||
#elif defined(__riscv)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.__gregs[REG_PC]);
|
||||
#elif defined(__s390__) && !defined(__s390x__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.psw.addr & 0x7fffffff);
|
||||
#elif defined(__s390__) && defined(__s390x__)
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.psw.addr);
|
||||
#elif defined(__x86_64__)
|
||||
if (16 < ABSL_ARRAYSIZE(context->uc_mcontext.gregs))
|
||||
return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
|
||||
#else
|
||||
#error "Undefined Architecture."
|
||||
#endif
|
||||
}
|
||||
#elif defined(__akaros__)
|
||||
auto* ctx = reinterpret_cast<struct user_context*>(vuc);
|
||||
return reinterpret_cast<void*>(get_user_ctx_pc(ctx));
|
||||
#endif
|
||||
static_cast<void>(vuc);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// The %p field width for printf() functions is two characters per byte,
|
||||
// and two extra for the leading "0x".
|
||||
static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
|
||||
|
||||
// Print a program counter, its stack frame size, and its symbol name.
|
||||
// Note that there is a separate symbolize_pc argument. Return addresses may be
|
||||
// at the end of the function, and this allows the caller to back up from pc if
|
||||
// appropriate.
|
||||
static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
|
||||
void* writerfn_arg, void* pc,
|
||||
void* symbolize_pc, int framesize,
|
||||
const char* const prefix) {
|
||||
char tmp[1024];
|
||||
const char* symbol = "(unknown)";
|
||||
if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
|
||||
symbol = tmp;
|
||||
}
|
||||
char buf[1024];
|
||||
if (framesize <= 0) {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc, symbol);
|
||||
} else {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc, framesize, symbol);
|
||||
}
|
||||
writerfn(buf, writerfn_arg);
|
||||
}
|
||||
|
||||
// Print a program counter and the corresponding stack frame size.
|
||||
static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
|
||||
void* writerfn_arg, void* pc, int framesize,
|
||||
const char* const prefix) {
|
||||
char buf[100];
|
||||
if (framesize <= 0) {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc);
|
||||
} else {
|
||||
snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
|
||||
kPrintfPointerFieldWidth, pc, framesize);
|
||||
}
|
||||
writerfn(buf, writerfn_arg);
|
||||
}
|
||||
|
||||
void DumpPCAndFrameSizesAndStackTrace(
|
||||
void* pc, void* const stack[], int frame_sizes[], int depth,
|
||||
int min_dropped_frames, bool symbolize_stacktrace,
|
||||
void (*writerfn)(const char*, void*), void* writerfn_arg) {
|
||||
if (pc != nullptr) {
|
||||
// We don't know the stack frame size for PC, use 0.
|
||||
if (symbolize_stacktrace) {
|
||||
DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
|
||||
} else {
|
||||
DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < depth; i++) {
|
||||
if (symbolize_stacktrace) {
|
||||
// Pass the previous address of pc as the symbol address because pc is a
|
||||
// return address, and an overrun may occur when the function ends with a
|
||||
// call to a function annotated noreturn (e.g. CHECK). Note that we don't
|
||||
// do this for pc above, as the adjustment is only correct for return
|
||||
// addresses.
|
||||
DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
|
||||
reinterpret_cast<char*>(stack[i]) - 1,
|
||||
frame_sizes[i], " ");
|
||||
} else {
|
||||
DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
|
||||
" ");
|
||||
}
|
||||
}
|
||||
if (min_dropped_frames > 0) {
|
||||
char buf[100];
|
||||
snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
|
||||
min_dropped_frames);
|
||||
writerfn(buf, writerfn_arg);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
42
third_party/abseil_cpp/absl/debugging/internal/examine_stack.h
vendored
Normal file
42
third_party/abseil_cpp/absl/debugging/internal/examine_stack.h
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
|
||||
|
||||
#include "absl/base/config.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Returns the program counter from signal context, or nullptr if
|
||||
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
|
||||
// ucontext_t on non-POSIX systems.
|
||||
void* GetProgramCounter(void* vuc);
|
||||
|
||||
// Uses `writerfn` to dump the program counter, stack trace, and stack
|
||||
// frame sizes.
|
||||
void DumpPCAndFrameSizesAndStackTrace(
|
||||
void* pc, void* const stack[], int frame_sizes[], int depth,
|
||||
int min_dropped_frames, bool symbolize_stacktrace,
|
||||
void (*writerfn)(const char*, void*), void* writerfn_arg);
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_
|
||||
184
third_party/abseil_cpp/absl/debugging/internal/stack_consumption.cc
vendored
Normal file
184
third_party/abseil_cpp/absl/debugging/internal/stack_consumption.cc
vendored
Normal file
|
|
@ -0,0 +1,184 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/debugging/internal/stack_consumption.h"
|
||||
|
||||
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
|
||||
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
namespace {
|
||||
|
||||
// This code requires that we know the direction in which the stack
|
||||
// grows. It is commonly believed that this can be detected by putting
|
||||
// a variable on the stack and then passing its address to a function
|
||||
// that compares the address of this variable to the address of a
|
||||
// variable on the function's own stack. However, this is unspecified
|
||||
// behavior in C++: If two pointers p and q of the same type point to
|
||||
// different objects that are not members of the same object or
|
||||
// elements of the same array or to different functions, or if only
|
||||
// one of them is null, the results of p<q, p>q, p<=q, and p>=q are
|
||||
// unspecified. Therefore, instead we hardcode the direction of the
|
||||
// stack on platforms we know about.
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__ppc__)
|
||||
constexpr bool kStackGrowsDown = true;
|
||||
#else
|
||||
#error Need to define kStackGrowsDown
|
||||
#endif
|
||||
|
||||
// To measure the stack footprint of some code, we create a signal handler
|
||||
// (for SIGUSR2 say) that exercises this code on an alternate stack. This
|
||||
// alternate stack is initialized to some known pattern (0x55, 0x55, 0x55,
|
||||
// ...). We then self-send this signal, and after the signal handler returns,
|
||||
// look at the alternate stack buffer to see what portion has been touched.
|
||||
//
|
||||
// This trick gives us the the stack footprint of the signal handler. But the
|
||||
// signal handler, even before the code for it is exercised, consumes some
|
||||
// stack already. We however only want the stack usage of the code inside the
|
||||
// signal handler. To measure this accurately, we install two signal handlers:
|
||||
// one that does nothing and just returns, and the user-provided signal
|
||||
// handler. The difference between the stack consumption of these two signals
|
||||
// handlers should give us the stack foorprint of interest.
|
||||
|
||||
void EmptySignalHandler(int) {}
|
||||
|
||||
// This is arbitrary value, and could be increase further, at the cost of
|
||||
// memset()ting it all to known sentinel value.
|
||||
constexpr int kAlternateStackSize = 64 << 10; // 64KiB
|
||||
|
||||
constexpr int kSafetyMargin = 32;
|
||||
constexpr char kAlternateStackFillValue = 0x55;
|
||||
|
||||
// These helper functions look at the alternate stack buffer, and figure
|
||||
// out what portion of this buffer has been touched - this is the stack
|
||||
// consumption of the signal handler running on this alternate stack.
|
||||
// This function will return -1 if the alternate stack buffer has not been
|
||||
// touched. It will abort the program if the buffer has overflowed or is about
|
||||
// to overflow.
|
||||
int GetStackConsumption(const void* const altstack) {
|
||||
const char* begin;
|
||||
int increment;
|
||||
if (kStackGrowsDown) {
|
||||
begin = reinterpret_cast<const char*>(altstack);
|
||||
increment = 1;
|
||||
} else {
|
||||
begin = reinterpret_cast<const char*>(altstack) + kAlternateStackSize - 1;
|
||||
increment = -1;
|
||||
}
|
||||
|
||||
for (int usage_count = kAlternateStackSize; usage_count > 0; --usage_count) {
|
||||
if (*begin != kAlternateStackFillValue) {
|
||||
ABSL_RAW_CHECK(usage_count <= kAlternateStackSize - kSafetyMargin,
|
||||
"Buffer has overflowed or is about to overflow");
|
||||
return usage_count;
|
||||
}
|
||||
begin += increment;
|
||||
}
|
||||
|
||||
ABSL_RAW_LOG(FATAL, "Unreachable code");
|
||||
return -1;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int GetSignalHandlerStackConsumption(void (*signal_handler)(int)) {
|
||||
// The alt-signal-stack cannot be heap allocated because there is a
|
||||
// bug in glibc-2.2 where some signal handler setup code looks at the
|
||||
// current stack pointer to figure out what thread is currently running.
|
||||
// Therefore, the alternate stack must be allocated from the main stack
|
||||
// itself.
|
||||
void* altstack = mmap(nullptr, kAlternateStackSize, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ABSL_RAW_CHECK(altstack != MAP_FAILED, "mmap() failed");
|
||||
|
||||
// Set up the alt-signal-stack (and save the older one).
|
||||
stack_t sigstk;
|
||||
memset(&sigstk, 0, sizeof(sigstk));
|
||||
sigstk.ss_sp = altstack;
|
||||
sigstk.ss_size = kAlternateStackSize;
|
||||
sigstk.ss_flags = 0;
|
||||
stack_t old_sigstk;
|
||||
memset(&old_sigstk, 0, sizeof(old_sigstk));
|
||||
ABSL_RAW_CHECK(sigaltstack(&sigstk, &old_sigstk) == 0,
|
||||
"sigaltstack() failed");
|
||||
|
||||
// Set up SIGUSR1 and SIGUSR2 signal handlers (and save the older ones).
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(sa));
|
||||
struct sigaction old_sa1, old_sa2;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sa.sa_flags = SA_ONSTACK;
|
||||
|
||||
// SIGUSR1 maps to EmptySignalHandler.
|
||||
sa.sa_handler = EmptySignalHandler;
|
||||
ABSL_RAW_CHECK(sigaction(SIGUSR1, &sa, &old_sa1) == 0, "sigaction() failed");
|
||||
|
||||
// SIGUSR2 maps to signal_handler.
|
||||
sa.sa_handler = signal_handler;
|
||||
ABSL_RAW_CHECK(sigaction(SIGUSR2, &sa, &old_sa2) == 0, "sigaction() failed");
|
||||
|
||||
// Send SIGUSR1 signal and measure the stack consumption of the empty
|
||||
// signal handler.
|
||||
// The first signal might use more stack space. Run once and ignore the
|
||||
// results to get that out of the way.
|
||||
ABSL_RAW_CHECK(kill(getpid(), SIGUSR1) == 0, "kill() failed");
|
||||
|
||||
memset(altstack, kAlternateStackFillValue, kAlternateStackSize);
|
||||
ABSL_RAW_CHECK(kill(getpid(), SIGUSR1) == 0, "kill() failed");
|
||||
int base_stack_consumption = GetStackConsumption(altstack);
|
||||
|
||||
// Send SIGUSR2 signal and measure the stack consumption of signal_handler.
|
||||
ABSL_RAW_CHECK(kill(getpid(), SIGUSR2) == 0, "kill() failed");
|
||||
int signal_handler_stack_consumption = GetStackConsumption(altstack);
|
||||
|
||||
// Now restore the old alt-signal-stack and signal handlers.
|
||||
if (old_sigstk.ss_sp == nullptr && old_sigstk.ss_size == 0 &&
|
||||
(old_sigstk.ss_flags & SS_DISABLE)) {
|
||||
// https://git.musl-libc.org/cgit/musl/commit/src/signal/sigaltstack.c?id=7829f42a2c8944555439380498ab8b924d0f2070
|
||||
// The original stack has ss_size==0 and ss_flags==SS_DISABLE, but some
|
||||
// versions of musl have a bug that rejects ss_size==0. Work around this by
|
||||
// setting ss_size to MINSIGSTKSZ, which should be ignored by the kernel
|
||||
// when SS_DISABLE is set.
|
||||
old_sigstk.ss_size = MINSIGSTKSZ;
|
||||
}
|
||||
ABSL_RAW_CHECK(sigaltstack(&old_sigstk, nullptr) == 0,
|
||||
"sigaltstack() failed");
|
||||
ABSL_RAW_CHECK(sigaction(SIGUSR1, &old_sa1, nullptr) == 0,
|
||||
"sigaction() failed");
|
||||
ABSL_RAW_CHECK(sigaction(SIGUSR2, &old_sa2, nullptr) == 0,
|
||||
"sigaction() failed");
|
||||
|
||||
ABSL_RAW_CHECK(munmap(altstack, kAlternateStackSize) == 0, "munmap() failed");
|
||||
if (signal_handler_stack_consumption != -1 && base_stack_consumption != -1) {
|
||||
return signal_handler_stack_consumption - base_stack_consumption;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
|
||||
49
third_party/abseil_cpp/absl/debugging/internal/stack_consumption.h
vendored
Normal file
49
third_party/abseil_cpp/absl/debugging/internal/stack_consumption.h
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Helper function for measuring stack consumption of signal handlers.
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
|
||||
|
||||
#include "absl/base/config.h"
|
||||
|
||||
// The code in this module is not portable.
|
||||
// Use this feature test macro to detect its availability.
|
||||
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
|
||||
#error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly
|
||||
#elif !defined(__APPLE__) && !defined(_WIN32) && \
|
||||
(defined(__i386__) || defined(__x86_64__) || defined(__ppc__))
|
||||
#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Returns the stack consumption in bytes for the code exercised by
|
||||
// signal_handler. To measure stack consumption, signal_handler is registered
|
||||
// as a signal handler, so the code that it exercises must be async-signal
|
||||
// safe. The argument of signal_handler is an implementation detail of signal
|
||||
// handlers and should ignored by the code for signal_handler. Use global
|
||||
// variables to pass information between your test code and signal_handler.
|
||||
int GetSignalHandlerStackConsumption(void (*signal_handler)(int));
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_
|
||||
50
third_party/abseil_cpp/absl/debugging/internal/stack_consumption_test.cc
vendored
Normal file
50
third_party/abseil_cpp/absl/debugging/internal/stack_consumption_test.cc
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
//
|
||||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/debugging/internal/stack_consumption.h"
|
||||
|
||||
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
namespace {
|
||||
|
||||
static void SimpleSignalHandler(int signo) {
|
||||
char buf[100];
|
||||
memset(buf, 'a', sizeof(buf));
|
||||
|
||||
// Never true, but prevents compiler from optimizing buf out.
|
||||
if (signo == 0) {
|
||||
ABSL_RAW_LOG(INFO, "%p", static_cast<void*>(buf));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SignalHandlerStackConsumptionTest, MeasuresStackConsumption) {
|
||||
// Our handler should consume reasonable number of bytes.
|
||||
EXPECT_GE(GetSignalHandlerStackConsumption(SimpleSignalHandler), 100);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
|
||||
196
third_party/abseil_cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
vendored
Normal file
196
third_party/abseil_cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
|
||||
|
||||
// Generate stack tracer for aarch64
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <sys/mman.h>
|
||||
#include <ucontext.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/debugging/internal/address_is_readable.h"
|
||||
#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
|
||||
static const uintptr_t kUnknownFrameSize = 0;
|
||||
|
||||
#if defined(__linux__)
|
||||
// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
|
||||
static const unsigned char* GetKernelRtSigreturnAddress() {
|
||||
constexpr uintptr_t kImpossibleAddress = 1;
|
||||
ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
|
||||
uintptr_t address = memoized.load(std::memory_order_relaxed);
|
||||
if (address != kImpossibleAddress) {
|
||||
return reinterpret_cast<const unsigned char*>(address);
|
||||
}
|
||||
|
||||
address = reinterpret_cast<uintptr_t>(nullptr);
|
||||
|
||||
#ifdef ABSL_HAVE_VDSO_SUPPORT
|
||||
absl::debugging_internal::VDSOSupport vdso;
|
||||
if (vdso.IsPresent()) {
|
||||
absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
|
||||
if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", STT_FUNC,
|
||||
&symbol_info) ||
|
||||
symbol_info.address == nullptr) {
|
||||
// Unexpected: VDSO is present, yet the expected symbol is missing
|
||||
// or null.
|
||||
assert(false && "VDSO is present, but doesn't have expected symbol");
|
||||
} else {
|
||||
if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
|
||||
kImpossibleAddress) {
|
||||
address = reinterpret_cast<uintptr_t>(symbol_info.address);
|
||||
} else {
|
||||
assert(false && "VDSO returned invalid address");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
memoized.store(address, std::memory_order_relaxed);
|
||||
return reinterpret_cast<const unsigned char*>(address);
|
||||
}
|
||||
#endif // __linux__
|
||||
|
||||
// Compute the size of a stack frame in [low..high). We assume that
|
||||
// low < high. Return size of kUnknownFrameSize.
|
||||
template<typename T>
|
||||
static inline uintptr_t ComputeStackFrameSize(const T* low,
|
||||
const T* high) {
|
||||
const char* low_char_ptr = reinterpret_cast<const char *>(low);
|
||||
const char* high_char_ptr = reinterpret_cast<const char *>(high);
|
||||
return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize;
|
||||
}
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return null if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
||||
static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
|
||||
void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
|
||||
bool check_frame_size = true;
|
||||
|
||||
#if defined(__linux__)
|
||||
if (WITH_CONTEXT && uc != nullptr) {
|
||||
// Check to see if next frame's return address is __kernel_rt_sigreturn.
|
||||
if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
|
||||
const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
|
||||
// old_frame_pointer[0] is not suitable for unwinding, look at
|
||||
// ucontext to discover frame pointer before signal.
|
||||
void **const pre_signal_frame_pointer =
|
||||
reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
|
||||
|
||||
// Check that alleged frame pointer is actually readable. This is to
|
||||
// prevent "double fault" in case we hit the first fault due to e.g.
|
||||
// stack corruption.
|
||||
if (!absl::debugging_internal::AddressIsReadable(
|
||||
pre_signal_frame_pointer))
|
||||
return nullptr;
|
||||
|
||||
// Alleged frame pointer is readable, use it for further unwinding.
|
||||
new_frame_pointer = pre_signal_frame_pointer;
|
||||
|
||||
// Skip frame size check if we return from a signal. We may be using a
|
||||
// an alternate stack for signals.
|
||||
check_frame_size = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// aarch64 ABI requires stack pointer to be 16-byte-aligned.
|
||||
if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 15) != 0)
|
||||
return nullptr;
|
||||
|
||||
// Check frame size. In strict mode, we assume frames to be under
|
||||
// 100,000 bytes. In non-strict mode, we relax the limit to 1MB.
|
||||
if (check_frame_size) {
|
||||
const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
|
||||
const uintptr_t frame_size =
|
||||
ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
|
||||
if (frame_size == kUnknownFrameSize || frame_size > max_size)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return new_frame_pointer;
|
||||
}
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
||||
static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
|
||||
const void *ucp, int *min_dropped_frames) {
|
||||
#ifdef __GNUC__
|
||||
void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
|
||||
#else
|
||||
# error reading stack point not yet supported on this platform.
|
||||
#endif
|
||||
|
||||
skip_count++; // Skip the frame for this function.
|
||||
int n = 0;
|
||||
|
||||
// The frame pointer points to low address of a frame. The first 64-bit
|
||||
// word of a frame points to the next frame up the call chain, which normally
|
||||
// is just after the high address of the current frame. The second word of
|
||||
// a frame contains return adress of to the caller. To find a pc value
|
||||
// associated with the current frame, we need to go down a level in the call
|
||||
// chain. So we remember return the address of the last frame seen. This
|
||||
// does not work for the first stack frame, which belongs to UnwindImp() but
|
||||
// we skip the frame for UnwindImp() anyway.
|
||||
void* prev_return_address = nullptr;
|
||||
|
||||
while (frame_pointer && n < max_depth) {
|
||||
// The absl::GetStackFrames routine is called when we are in some
|
||||
// informational context (the failure signal handler for example).
|
||||
// Use the non-strict unwinding rules to produce a stack trace
|
||||
// that is as complete as possible (even if it contains a few bogus
|
||||
// entries in some rare cases).
|
||||
void **next_frame_pointer =
|
||||
NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
|
||||
|
||||
if (skip_count > 0) {
|
||||
skip_count--;
|
||||
} else {
|
||||
result[n] = prev_return_address;
|
||||
if (IS_STACK_FRAMES) {
|
||||
sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
|
||||
}
|
||||
n++;
|
||||
}
|
||||
prev_return_address = frame_pointer[1];
|
||||
frame_pointer = next_frame_pointer;
|
||||
}
|
||||
if (min_dropped_frames != nullptr) {
|
||||
// Implementation detail: we clamp the max of frames we are willing to
|
||||
// count, so as not to spend too much time in the loop below.
|
||||
const int kMaxUnwind = 200;
|
||||
int j = 0;
|
||||
for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
|
||||
frame_pointer =
|
||||
NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
|
||||
}
|
||||
*min_dropped_frames = j;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return true;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
|
||||
134
third_party/abseil_cpp/absl/debugging/internal/stacktrace_arm-inl.inc
vendored
Normal file
134
third_party/abseil_cpp/absl/debugging/internal/stacktrace_arm-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// This is inspired by Craig Silverstein's PowerPC stacktrace code.
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
|
||||
// WARNING:
|
||||
// This only works if all your code is in either ARM or THUMB mode. With
|
||||
// interworking, the frame pointer of the caller can either be in r11 (ARM
|
||||
// mode) or r7 (THUMB mode). A callee only saves the frame pointer of its
|
||||
// mode in a fixed location on its stack frame. If the caller is a different
|
||||
// mode, there is no easy way to find the frame pointer. It can either be
|
||||
// still in the designated register or saved on stack along with other callee
|
||||
// saved registers.
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return nullptr if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING>
|
||||
static void **NextStackFrame(void **old_sp) {
|
||||
void **new_sp = (void**) old_sp[-1];
|
||||
|
||||
// Check that the transition from frame pointer old_sp to frame
|
||||
// pointer new_sp isn't clearly bogus
|
||||
if (STRICT_UNWINDING) {
|
||||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_sp <= old_sp) return nullptr;
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
|
||||
} else {
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_sp == old_sp) return nullptr;
|
||||
// And allow frames upto about 1MB.
|
||||
if ((new_sp > old_sp)
|
||||
&& ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
|
||||
}
|
||||
if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr;
|
||||
return new_sp;
|
||||
}
|
||||
|
||||
// This ensures that absl::GetStackTrace sets up the Link Register properly.
|
||||
#ifdef __GNUC__
|
||||
void StacktraceArmDummyFunction() __attribute__((noinline));
|
||||
void StacktraceArmDummyFunction() { __asm__ volatile(""); }
|
||||
#else
|
||||
# error StacktraceArmDummyFunction() needs to be ported to this platform.
|
||||
#endif
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
|
||||
const void * /* ucp */, int *min_dropped_frames) {
|
||||
#ifdef __GNUC__
|
||||
void **sp = reinterpret_cast<void**>(__builtin_frame_address(0));
|
||||
#else
|
||||
# error reading stack point not yet supported on this platform.
|
||||
#endif
|
||||
|
||||
// On ARM, the return address is stored in the link register (r14).
|
||||
// This is not saved on the stack frame of a leaf function. To
|
||||
// simplify code that reads return addresses, we call a dummy
|
||||
// function so that the return address of this function is also
|
||||
// stored in the stack frame. This works at least for gcc.
|
||||
StacktraceArmDummyFunction();
|
||||
|
||||
int n = 0;
|
||||
while (sp && n < max_depth) {
|
||||
// The absl::GetStackFrames routine is called when we are in some
|
||||
// informational context (the failure signal handler for example).
|
||||
// Use the non-strict unwinding rules to produce a stack trace
|
||||
// that is as complete as possible (even if it contains a few bogus
|
||||
// entries in some rare cases).
|
||||
void **next_sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
|
||||
|
||||
if (skip_count > 0) {
|
||||
skip_count--;
|
||||
} else {
|
||||
result[n] = *sp;
|
||||
|
||||
if (IS_STACK_FRAMES) {
|
||||
if (next_sp > sp) {
|
||||
sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
|
||||
} else {
|
||||
// A frame-size of 0 is used to indicate unknown frame size.
|
||||
sizes[n] = 0;
|
||||
}
|
||||
}
|
||||
n++;
|
||||
}
|
||||
sp = next_sp;
|
||||
}
|
||||
if (min_dropped_frames != nullptr) {
|
||||
// Implementation detail: we clamp the max of frames we are willing to
|
||||
// count, so as not to spend too much time in the loop below.
|
||||
const int kMaxUnwind = 200;
|
||||
int j = 0;
|
||||
for (; sp != nullptr && j < kMaxUnwind; j++) {
|
||||
sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
|
||||
}
|
||||
*min_dropped_frames = j;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return false;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_
|
||||
70
third_party/abseil_cpp/absl/debugging/internal/stacktrace_config.h
vendored
Normal file
70
third_party/abseil_cpp/absl/debugging/internal/stacktrace_config.h
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright 2017 The Abseil Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
|
||||
* Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing
|
||||
* actual unwinder implementation.
|
||||
* This header is "private" to stacktrace.cc.
|
||||
* DO NOT include it into any other files.
|
||||
*/
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
|
||||
|
||||
#if defined(ABSL_STACKTRACE_INL_HEADER)
|
||||
#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
|
||||
|
||||
#elif defined(_WIN32)
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_win32-inl.inc"
|
||||
|
||||
#elif defined(__linux__) && !defined(__ANDROID__)
|
||||
|
||||
#if !defined(NO_FRAME_POINTER)
|
||||
# if defined(__i386__) || defined(__x86_64__)
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_x86-inl.inc"
|
||||
# elif defined(__ppc__) || defined(__PPC__)
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_powerpc-inl.inc"
|
||||
# elif defined(__aarch64__)
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_aarch64-inl.inc"
|
||||
# elif defined(__arm__)
|
||||
// Note: When using glibc this may require -funwind-tables to function properly.
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_generic-inl.inc"
|
||||
# else
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_unimplemented-inl.inc"
|
||||
# endif
|
||||
#else // defined(NO_FRAME_POINTER)
|
||||
# if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_generic-inl.inc"
|
||||
# elif defined(__ppc__) || defined(__PPC__)
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_generic-inl.inc"
|
||||
# else
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_unimplemented-inl.inc"
|
||||
# endif
|
||||
#endif // NO_FRAME_POINTER
|
||||
|
||||
#else
|
||||
#define ABSL_STACKTRACE_INL_HEADER \
|
||||
"absl/debugging/internal/stacktrace_unimplemented-inl.inc"
|
||||
|
||||
#endif
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_
|
||||
108
third_party/abseil_cpp/absl/debugging/internal/stacktrace_generic-inl.inc
vendored
Normal file
108
third_party/abseil_cpp/absl/debugging/internal/stacktrace_generic-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Portable implementation - just use glibc
|
||||
//
|
||||
// Note: The glibc implementation may cause a call to malloc.
|
||||
// This can cause a deadlock in HeapProfiler.
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
|
||||
|
||||
#include <execinfo.h>
|
||||
#include <atomic>
|
||||
#include <cstring>
|
||||
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
#include "absl/base/attributes.h"
|
||||
|
||||
// Sometimes, we can try to get a stack trace from within a stack
|
||||
// trace, because we don't block signals inside this code (which would be too
|
||||
// expensive: the two extra system calls per stack trace do matter here).
|
||||
// That can cause a self-deadlock.
|
||||
// Protect against such reentrant call by failing to get a stack trace.
|
||||
//
|
||||
// We use __thread here because the code here is extremely low level -- it is
|
||||
// called while collecting stack traces from within malloc and mmap, and thus
|
||||
// can not call anything which might call malloc or mmap itself.
|
||||
static __thread int recursive = 0;
|
||||
|
||||
// The stack trace function might be invoked very early in the program's
|
||||
// execution (e.g. from the very first malloc if using tcmalloc). Also, the
|
||||
// glibc implementation itself will trigger malloc the first time it is called.
|
||||
// As such, we suppress usage of backtrace during this early stage of execution.
|
||||
static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
|
||||
// Waiting until static initializers run seems to be late enough.
|
||||
// This file is included into stacktrace.cc so this will only run once.
|
||||
ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
|
||||
void* unused_stack[1];
|
||||
// Force the first backtrace to happen early to get the one-time shared lib
|
||||
// loading (allocation) out of the way. After the first call it is much safer
|
||||
// to use backtrace from a signal handler if we crash somewhere later.
|
||||
backtrace(unused_stack, 1);
|
||||
disable_stacktraces.store(false, std::memory_order_relaxed);
|
||||
return 0;
|
||||
}();
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
|
||||
const void *ucp, int *min_dropped_frames) {
|
||||
if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
|
||||
return 0;
|
||||
}
|
||||
++recursive;
|
||||
|
||||
static_cast<void>(ucp); // Unused.
|
||||
static const int kStackLength = 64;
|
||||
void * stack[kStackLength];
|
||||
int size;
|
||||
|
||||
size = backtrace(stack, kStackLength);
|
||||
skip_count++; // we want to skip the current frame as well
|
||||
int result_count = size - skip_count;
|
||||
if (result_count < 0)
|
||||
result_count = 0;
|
||||
if (result_count > max_depth)
|
||||
result_count = max_depth;
|
||||
for (int i = 0; i < result_count; i++)
|
||||
result[i] = stack[i + skip_count];
|
||||
|
||||
if (IS_STACK_FRAMES) {
|
||||
// No implementation for finding out the stack frame sizes yet.
|
||||
memset(sizes, 0, sizeof(*sizes) * result_count);
|
||||
}
|
||||
if (min_dropped_frames != nullptr) {
|
||||
if (size - skip_count - max_depth > 0) {
|
||||
*min_dropped_frames = size - skip_count - max_depth;
|
||||
} else {
|
||||
*min_dropped_frames = 0;
|
||||
}
|
||||
}
|
||||
|
||||
--recursive;
|
||||
|
||||
return result_count;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return true;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
|
||||
248
third_party/abseil_cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
vendored
Normal file
248
third_party/abseil_cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Produce stack trace. I'm guessing (hoping!) the code is much like
|
||||
// for x86. For apple machines, at least, it seems to be; see
|
||||
// https://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html
|
||||
// https://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK
|
||||
// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <asm/ptrace.h> // for PT_NIP.
|
||||
#include <ucontext.h> // for ucontext_t
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/base/optimization.h"
|
||||
#include "absl/base/port.h"
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
#include "absl/debugging/internal/address_is_readable.h"
|
||||
#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
|
||||
|
||||
// Given a stack pointer, return the saved link register value.
|
||||
// Note that this is the link register for a callee.
|
||||
static inline void *StacktracePowerPCGetLR(void **sp) {
|
||||
// PowerPC has 3 main ABIs, which say where in the stack the
|
||||
// Link Register is. For DARWIN and AIX (used by apple and
|
||||
// linux ppc64), it's in sp[2]. For SYSV (used by linux ppc),
|
||||
// it's in sp[1].
|
||||
#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
|
||||
return *(sp+2);
|
||||
#elif defined(_CALL_SYSV)
|
||||
return *(sp+1);
|
||||
#elif defined(__APPLE__) || defined(__FreeBSD__) || \
|
||||
(defined(__linux__) && defined(__PPC64__))
|
||||
// This check is in case the compiler doesn't define _CALL_AIX/etc.
|
||||
return *(sp+2);
|
||||
#elif defined(__linux)
|
||||
// This check is in case the compiler doesn't define _CALL_SYSV.
|
||||
return *(sp+1);
|
||||
#else
|
||||
#error Need to specify the PPC ABI for your archiecture.
|
||||
#endif
|
||||
}
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return null if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING, bool IS_WITH_CONTEXT>
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
||||
static void **NextStackFrame(void **old_sp, const void *uc) {
|
||||
void **new_sp = (void **) *old_sp;
|
||||
enum { kStackAlignment = 16 };
|
||||
|
||||
// Check that the transition from frame pointer old_sp to frame
|
||||
// pointer new_sp isn't clearly bogus
|
||||
if (STRICT_UNWINDING) {
|
||||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_sp <= old_sp) return nullptr;
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
|
||||
} else {
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_sp == old_sp) return nullptr;
|
||||
// And allow frames upto about 1MB.
|
||||
if ((new_sp > old_sp)
|
||||
&& ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
|
||||
}
|
||||
if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr;
|
||||
|
||||
#if defined(__linux__)
|
||||
enum StackTraceKernelSymbolStatus {
|
||||
kNotInitialized = 0, kAddressValid, kAddressInvalid };
|
||||
|
||||
if (IS_WITH_CONTEXT && uc != nullptr) {
|
||||
static StackTraceKernelSymbolStatus kernel_symbol_status =
|
||||
kNotInitialized; // Sentinel: not computed yet.
|
||||
// Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not
|
||||
// possibly be there.
|
||||
static const unsigned char *kernel_sigtramp_rt64_address = nullptr;
|
||||
if (kernel_symbol_status == kNotInitialized) {
|
||||
absl::debugging_internal::VDSOSupport vdso;
|
||||
if (vdso.IsPresent()) {
|
||||
absl::debugging_internal::VDSOSupport::SymbolInfo
|
||||
sigtramp_rt64_symbol_info;
|
||||
if (!vdso.LookupSymbol(
|
||||
"__kernel_sigtramp_rt64", "LINUX_2.6.15",
|
||||
absl::debugging_internal::VDSOSupport::kVDSOSymbolType,
|
||||
&sigtramp_rt64_symbol_info) ||
|
||||
sigtramp_rt64_symbol_info.address == nullptr) {
|
||||
// Unexpected: VDSO is present, yet the expected symbol is missing
|
||||
// or null.
|
||||
assert(false && "VDSO is present, but doesn't have expected symbol");
|
||||
kernel_symbol_status = kAddressInvalid;
|
||||
} else {
|
||||
kernel_sigtramp_rt64_address =
|
||||
reinterpret_cast<const unsigned char *>(
|
||||
sigtramp_rt64_symbol_info.address);
|
||||
kernel_symbol_status = kAddressValid;
|
||||
}
|
||||
} else {
|
||||
kernel_symbol_status = kAddressInvalid;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_sp != nullptr &&
|
||||
kernel_symbol_status == kAddressValid &&
|
||||
StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) {
|
||||
const ucontext_t* signal_context =
|
||||
reinterpret_cast<const ucontext_t*>(uc);
|
||||
void **const sp_before_signal =
|
||||
reinterpret_cast<void**>(signal_context->uc_mcontext.gp_regs[PT_R1]);
|
||||
// Check that alleged sp before signal is nonnull and is reasonably
|
||||
// aligned.
|
||||
if (sp_before_signal != nullptr &&
|
||||
((uintptr_t)sp_before_signal % kStackAlignment) == 0) {
|
||||
// Check that alleged stack pointer is actually readable. This is to
|
||||
// prevent a "double fault" in case we hit the first fault due to e.g.
|
||||
// a stack corruption.
|
||||
if (absl::debugging_internal::AddressIsReadable(sp_before_signal)) {
|
||||
// Alleged stack pointer is readable, use it for further unwinding.
|
||||
new_sp = sp_before_signal;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return new_sp;
|
||||
}
|
||||
|
||||
// This ensures that absl::GetStackTrace sets up the Link Register properly.
|
||||
ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() {
|
||||
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
|
||||
}
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
||||
static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
|
||||
const void *ucp, int *min_dropped_frames) {
|
||||
void **sp;
|
||||
// Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
|
||||
// and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a
|
||||
// different asm syntax. I don't know quite the best way to discriminate
|
||||
// systems using the old as from the new one; I've gone with __APPLE__.
|
||||
#ifdef __APPLE__
|
||||
__asm__ volatile ("mr %0,r1" : "=r" (sp));
|
||||
#else
|
||||
__asm__ volatile ("mr %0,1" : "=r" (sp));
|
||||
#endif
|
||||
|
||||
// On PowerPC, the "Link Register" or "Link Record" (LR), is a stack
|
||||
// entry that holds the return address of the subroutine call (what
|
||||
// instruction we run after our function finishes). This is the
|
||||
// same as the stack-pointer of our parent routine, which is what we
|
||||
// want here. While the compiler will always(?) set up LR for
|
||||
// subroutine calls, it may not for leaf functions (such as this one).
|
||||
// This routine forces the compiler (at least gcc) to push it anyway.
|
||||
AbslStacktracePowerPCDummyFunction();
|
||||
|
||||
// The LR save area is used by the callee, so the top entry is bogus.
|
||||
skip_count++;
|
||||
|
||||
int n = 0;
|
||||
|
||||
// Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in
|
||||
// the link register) of a function call is stored in the caller's stack
|
||||
// frame instead of the callee's. When we look for the return address
|
||||
// associated with a stack frame, we need to make sure that there is a
|
||||
// caller frame before it. So we call NextStackFrame before entering the
|
||||
// loop below and check next_sp instead of sp for loop termination.
|
||||
// The outermost frame is set up by runtimes and it does not have a
|
||||
// caller frame, so it is skipped.
|
||||
|
||||
// The absl::GetStackFrames routine is called when we are in some
|
||||
// informational context (the failure signal handler for example).
|
||||
// Use the non-strict unwinding rules to produce a stack trace
|
||||
// that is as complete as possible (even if it contains a few
|
||||
// bogus entries in some rare cases).
|
||||
void **next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
|
||||
|
||||
while (next_sp && n < max_depth) {
|
||||
if (skip_count > 0) {
|
||||
skip_count--;
|
||||
} else {
|
||||
result[n] = StacktracePowerPCGetLR(sp);
|
||||
if (IS_STACK_FRAMES) {
|
||||
if (next_sp > sp) {
|
||||
sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp;
|
||||
} else {
|
||||
// A frame-size of 0 is used to indicate unknown frame size.
|
||||
sizes[n] = 0;
|
||||
}
|
||||
}
|
||||
n++;
|
||||
}
|
||||
|
||||
sp = next_sp;
|
||||
next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(sp, ucp);
|
||||
}
|
||||
|
||||
if (min_dropped_frames != nullptr) {
|
||||
// Implementation detail: we clamp the max of frames we are willing to
|
||||
// count, so as not to spend too much time in the loop below.
|
||||
const int kMaxUnwind = 1000;
|
||||
int j = 0;
|
||||
for (; next_sp != nullptr && j < kMaxUnwind; j++) {
|
||||
next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
|
||||
}
|
||||
*min_dropped_frames = j;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return true;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_
|
||||
24
third_party/abseil_cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc
vendored
Normal file
24
third_party/abseil_cpp/absl/debugging/internal/stacktrace_unimplemented-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
static int UnwindImpl(void** /* result */, int* /* sizes */,
|
||||
int /* max_depth */, int /* skip_count */,
|
||||
const void* /* ucp */, int *min_dropped_frames) {
|
||||
if (min_dropped_frames != nullptr) {
|
||||
*min_dropped_frames = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return false;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_
|
||||
93
third_party/abseil_cpp/absl/debugging/internal/stacktrace_win32-inl.inc
vendored
Normal file
93
third_party/abseil_cpp/absl/debugging/internal/stacktrace_win32-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Produces a stack trace for Windows. Normally, one could use
|
||||
// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that
|
||||
// should work for binaries compiled using MSVC in "debug" mode.
|
||||
// However, in "release" mode, Windows uses frame-pointer
|
||||
// optimization, which makes getting a stack trace very difficult.
|
||||
//
|
||||
// There are several approaches one can take. One is to use Windows
|
||||
// intrinsics like StackWalk64. These can work, but have restrictions
|
||||
// on how successful they can be. Another attempt is to write a
|
||||
// version of stacktrace_x86-inl.h that has heuristic support for
|
||||
// dealing with FPO, similar to what WinDbg does (see
|
||||
// http://www.nynaeve.net/?p=97). There are (non-working) examples of
|
||||
// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1
|
||||
//
|
||||
// The solution we've ended up doing is to call the undocumented
|
||||
// windows function RtlCaptureStackBackTrace, which probably doesn't
|
||||
// work with FPO but at least is fast, and doesn't require a symbol
|
||||
// server.
|
||||
//
|
||||
// This code is inspired by a patch from David Vitek:
|
||||
// https://code.google.com/p/google-perftools/issues/detail?id=83
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
|
||||
|
||||
#include <windows.h> // for GetProcAddress and GetModuleHandle
|
||||
#include <cassert>
|
||||
|
||||
typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
|
||||
IN ULONG frames_to_skip,
|
||||
IN ULONG frames_to_capture,
|
||||
OUT PVOID *backtrace,
|
||||
OUT PULONG backtrace_hash);
|
||||
|
||||
// It is not possible to load RtlCaptureStackBackTrace at static init time in
|
||||
// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace
|
||||
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
|
||||
!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
|
||||
static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
|
||||
&::CaptureStackBackTrace;
|
||||
#else
|
||||
// Load the function we need at static init time, where we don't have
|
||||
// to worry about someone else holding the loader's lock.
|
||||
static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn =
|
||||
(RtlCaptureStackBackTrace_Function*)GetProcAddress(
|
||||
GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace");
|
||||
#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
|
||||
const void*, int* min_dropped_frames) {
|
||||
int n = 0;
|
||||
if (!RtlCaptureStackBackTrace_fn) {
|
||||
// can't find a stacktrace with no function to call
|
||||
} else {
|
||||
n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0);
|
||||
}
|
||||
if (IS_STACK_FRAMES) {
|
||||
// No implementation for finding out the stack frame sizes yet.
|
||||
memset(sizes, 0, sizeof(*sizes) * n);
|
||||
}
|
||||
if (min_dropped_frames != nullptr) {
|
||||
// Not implemented.
|
||||
*min_dropped_frames = 0;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return false;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_
|
||||
346
third_party/abseil_cpp/absl/debugging/internal/stacktrace_x86-inl.inc
vendored
Normal file
346
third_party/abseil_cpp/absl/debugging/internal/stacktrace_x86-inl.inc
vendored
Normal file
|
|
@ -0,0 +1,346 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Produce stack trace
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
|
||||
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
|
||||
|
||||
#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
|
||||
#include <ucontext.h> // for ucontext_t
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32)
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/base/macros.h"
|
||||
#include "absl/base/port.h"
|
||||
#include "absl/debugging/internal/address_is_readable.h"
|
||||
#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
|
||||
#include "absl/debugging/stacktrace.h"
|
||||
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
|
||||
using absl::debugging_internal::AddressIsReadable;
|
||||
|
||||
#if defined(__linux__) && defined(__i386__)
|
||||
// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
|
||||
// preceeding "syscall" or "sysenter".
|
||||
// If __kernel_vsyscall uses frame pointer, answer 0.
|
||||
//
|
||||
// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
|
||||
// to analyze before giving up. Up to kMaxBytes+1 bytes of
|
||||
// instructions could be accessed.
|
||||
//
|
||||
// Here are known __kernel_vsyscall instruction sequences:
|
||||
//
|
||||
// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
|
||||
// Used on Intel.
|
||||
// 0xffffe400 <__kernel_vsyscall+0>: push %ecx
|
||||
// 0xffffe401 <__kernel_vsyscall+1>: push %edx
|
||||
// 0xffffe402 <__kernel_vsyscall+2>: push %ebp
|
||||
// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp
|
||||
// 0xffffe405 <__kernel_vsyscall+5>: sysenter
|
||||
//
|
||||
// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
|
||||
// Used on AMD.
|
||||
// 0xffffe400 <__kernel_vsyscall+0>: push %ebp
|
||||
// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
|
||||
// 0xffffe403 <__kernel_vsyscall+3>: syscall
|
||||
//
|
||||
|
||||
// The sequence below isn't actually expected in Google fleet,
|
||||
// here only for completeness. Remove this comment from OSS release.
|
||||
|
||||
// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
|
||||
// 0xffffe400 <__kernel_vsyscall+0>: int $0x80
|
||||
// 0xffffe401 <__kernel_vsyscall+1>: ret
|
||||
//
|
||||
static const int kMaxBytes = 10;
|
||||
|
||||
// We use assert()s instead of DCHECK()s -- this is too low level
|
||||
// for DCHECK().
|
||||
|
||||
static int CountPushInstructions(const unsigned char *const addr) {
|
||||
int result = 0;
|
||||
for (int i = 0; i < kMaxBytes; ++i) {
|
||||
if (addr[i] == 0x89) {
|
||||
// "mov reg,reg"
|
||||
if (addr[i + 1] == 0xE5) {
|
||||
// Found "mov %esp,%ebp".
|
||||
return 0;
|
||||
}
|
||||
++i; // Skip register encoding byte.
|
||||
} else if (addr[i] == 0x0F &&
|
||||
(addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
|
||||
// Found "sysenter" or "syscall".
|
||||
return result;
|
||||
} else if ((addr[i] & 0xF0) == 0x50) {
|
||||
// Found "push %reg".
|
||||
++result;
|
||||
} else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
|
||||
// Found "int $0x80"
|
||||
assert(result == 0);
|
||||
return 0;
|
||||
} else {
|
||||
// Unexpected instruction.
|
||||
assert(false && "unexpected instruction in __kernel_vsyscall");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
// Unexpected: didn't find SYSENTER or SYSCALL in
|
||||
// [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
|
||||
assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
static const int kMaxFrameBytes = 100000;
|
||||
|
||||
// Returns the stack frame pointer from signal context, 0 if unknown.
|
||||
// vuc is a ucontext_t *. We use void* to avoid the use
|
||||
// of ucontext_t on non-POSIX systems.
|
||||
static uintptr_t GetFP(const void *vuc) {
|
||||
#if !defined(__linux__)
|
||||
static_cast<void>(vuc); // Avoid an unused argument compiler warning.
|
||||
#else
|
||||
if (vuc != nullptr) {
|
||||
auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
|
||||
#if defined(__i386__)
|
||||
const auto bp = uc->uc_mcontext.gregs[REG_EBP];
|
||||
const auto sp = uc->uc_mcontext.gregs[REG_ESP];
|
||||
#elif defined(__x86_64__)
|
||||
const auto bp = uc->uc_mcontext.gregs[REG_RBP];
|
||||
const auto sp = uc->uc_mcontext.gregs[REG_RSP];
|
||||
#else
|
||||
const uintptr_t bp = 0;
|
||||
const uintptr_t sp = 0;
|
||||
#endif
|
||||
// Sanity-check that the base pointer is valid. It should be as long as
|
||||
// SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
|
||||
// the process is compiled with --copt=-fomit-frame-pointer or
|
||||
// --copt=-momit-leaf-frame-pointer.
|
||||
//
|
||||
// TODO(bcmills): -momit-leaf-frame-pointer is currently the default
|
||||
// behavior when building with clang. Talk to the C++ toolchain team about
|
||||
// fixing that.
|
||||
if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
|
||||
|
||||
// If bp isn't a plausible frame pointer, return the stack pointer instead.
|
||||
// If we're lucky, it points to the start of a stack frame; otherwise, we'll
|
||||
// get one frame of garbage in the stack trace and fail the sanity check on
|
||||
// the next iteration.
|
||||
return sp;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return null if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
||||
static void **NextStackFrame(void **old_fp, const void *uc) {
|
||||
void **new_fp = (void **)*old_fp;
|
||||
|
||||
#if defined(__linux__) && defined(__i386__)
|
||||
if (WITH_CONTEXT && uc != nullptr) {
|
||||
// How many "push %reg" instructions are there at __kernel_vsyscall?
|
||||
// This is constant for a given kernel and processor, so compute
|
||||
// it only once.
|
||||
static int num_push_instructions = -1; // Sentinel: not computed yet.
|
||||
// Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
|
||||
// be there.
|
||||
static const unsigned char *kernel_rt_sigreturn_address = nullptr;
|
||||
static const unsigned char *kernel_vsyscall_address = nullptr;
|
||||
if (num_push_instructions == -1) {
|
||||
#ifdef ABSL_HAVE_VDSO_SUPPORT
|
||||
absl::debugging_internal::VDSOSupport vdso;
|
||||
if (vdso.IsPresent()) {
|
||||
absl::debugging_internal::VDSOSupport::SymbolInfo
|
||||
rt_sigreturn_symbol_info;
|
||||
absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
|
||||
if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
|
||||
&rt_sigreturn_symbol_info) ||
|
||||
!vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
|
||||
&vsyscall_symbol_info) ||
|
||||
rt_sigreturn_symbol_info.address == nullptr ||
|
||||
vsyscall_symbol_info.address == nullptr) {
|
||||
// Unexpected: 32-bit VDSO is present, yet one of the expected
|
||||
// symbols is missing or null.
|
||||
assert(false && "VDSO is present, but doesn't have expected symbols");
|
||||
num_push_instructions = 0;
|
||||
} else {
|
||||
kernel_rt_sigreturn_address =
|
||||
reinterpret_cast<const unsigned char *>(
|
||||
rt_sigreturn_symbol_info.address);
|
||||
kernel_vsyscall_address =
|
||||
reinterpret_cast<const unsigned char *>(
|
||||
vsyscall_symbol_info.address);
|
||||
num_push_instructions =
|
||||
CountPushInstructions(kernel_vsyscall_address);
|
||||
}
|
||||
} else {
|
||||
num_push_instructions = 0;
|
||||
}
|
||||
#else // ABSL_HAVE_VDSO_SUPPORT
|
||||
num_push_instructions = 0;
|
||||
#endif // ABSL_HAVE_VDSO_SUPPORT
|
||||
}
|
||||
if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
|
||||
old_fp[1] == kernel_rt_sigreturn_address) {
|
||||
const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
|
||||
// This kernel does not use frame pointer in its VDSO code,
|
||||
// and so %ebp is not suitable for unwinding.
|
||||
void **const reg_ebp =
|
||||
reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
|
||||
const unsigned char *const reg_eip =
|
||||
reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
|
||||
if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
|
||||
reg_eip - kernel_vsyscall_address < kMaxBytes) {
|
||||
// We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
|
||||
// Restore from 'ucv' instead.
|
||||
void **const reg_esp =
|
||||
reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
|
||||
// Check that alleged %esp is not null and is reasonably aligned.
|
||||
if (reg_esp &&
|
||||
((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
|
||||
// Check that alleged %esp is actually readable. This is to prevent
|
||||
// "double fault" in case we hit the first fault due to e.g. stack
|
||||
// corruption.
|
||||
void *const reg_esp2 = reg_esp[num_push_instructions - 1];
|
||||
if (AddressIsReadable(reg_esp2)) {
|
||||
// Alleged %esp is readable, use it for further unwinding.
|
||||
new_fp = reinterpret_cast<void **>(reg_esp2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
|
||||
const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);
|
||||
|
||||
// Check that the transition from frame pointer old_fp to frame
|
||||
// pointer new_fp isn't clearly bogus. Skip the checks if new_fp
|
||||
// matches the signal context, so that we don't skip out early when
|
||||
// using an alternate signal stack.
|
||||
//
|
||||
// TODO(bcmills): The GetFP call should be completely unnecessary when
|
||||
// SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
|
||||
// stack by this point), but it is empirically still needed (e.g. when the
|
||||
// stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some
|
||||
// frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what
|
||||
// it's supposed to.
|
||||
if (STRICT_UNWINDING &&
|
||||
(!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
|
||||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_fp_u <= old_fp_u) return nullptr;
|
||||
if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
|
||||
} else {
|
||||
if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_fp == old_fp) return nullptr;
|
||||
}
|
||||
|
||||
if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
|
||||
#ifdef __i386__
|
||||
// On 32-bit machines, the stack pointer can be very close to
|
||||
// 0xffffffff, so we explicitly check for a pointer into the
|
||||
// last two pages in the address space
|
||||
if (new_fp_u >= 0xffffe000) return nullptr;
|
||||
#endif
|
||||
#if !defined(_WIN32)
|
||||
if (!STRICT_UNWINDING) {
|
||||
// Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
|
||||
// on AMD-based machines with VDSO-enabled kernels.
|
||||
// Make an extra sanity check to insure new_fp is readable.
|
||||
// Note: NextStackFrame<false>() is only called while the program
|
||||
// is already on its last leg, so it's ok to be slow here.
|
||||
|
||||
if (!AddressIsReadable(new_fp)) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return new_fp;
|
||||
}
|
||||
|
||||
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
|
||||
ABSL_ATTRIBUTE_NOINLINE
|
||||
static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
|
||||
const void *ucp, int *min_dropped_frames) {
|
||||
int n = 0;
|
||||
void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
|
||||
|
||||
while (fp && n < max_depth) {
|
||||
if (*(fp + 1) == reinterpret_cast<void *>(0)) {
|
||||
// In 64-bit code, we often see a frame that
|
||||
// points to itself and has a return address of 0.
|
||||
break;
|
||||
}
|
||||
void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
|
||||
if (skip_count > 0) {
|
||||
skip_count--;
|
||||
} else {
|
||||
result[n] = *(fp + 1);
|
||||
if (IS_STACK_FRAMES) {
|
||||
if (next_fp > fp) {
|
||||
sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
|
||||
} else {
|
||||
// A frame-size of 0 is used to indicate unknown frame size.
|
||||
sizes[n] = 0;
|
||||
}
|
||||
}
|
||||
n++;
|
||||
}
|
||||
fp = next_fp;
|
||||
}
|
||||
if (min_dropped_frames != nullptr) {
|
||||
// Implementation detail: we clamp the max of frames we are willing to
|
||||
// count, so as not to spend too much time in the loop below.
|
||||
const int kMaxUnwind = 1000;
|
||||
int j = 0;
|
||||
for (; fp != nullptr && j < kMaxUnwind; j++) {
|
||||
fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
|
||||
}
|
||||
*min_dropped_frames = j;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
bool StackTraceWorksForTest() {
|
||||
return true;
|
||||
}
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
|
||||
128
third_party/abseil_cpp/absl/debugging/internal/symbolize.h
vendored
Normal file
128
third_party/abseil_cpp/absl/debugging/internal/symbolize.h
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2018 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file contains internal parts of the Abseil symbolizer.
|
||||
// Do not depend on the anything in this file, it may change at anytime.
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
#include "absl/base/config.h"
|
||||
|
||||
#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
|
||||
#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set
|
||||
#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && \
|
||||
!defined(__asmjs__) && !defined(__wasm__)
|
||||
#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1
|
||||
|
||||
#include <elf.h>
|
||||
#include <link.h> // For ElfW() macro.
|
||||
#include <functional>
|
||||
#include <string>
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// Iterates over all sections, invoking callback on each with the section name
|
||||
// and the section header.
|
||||
//
|
||||
// Returns true on success; otherwise returns false in case of errors.
|
||||
//
|
||||
// This is not async-signal-safe.
|
||||
bool ForEachSection(int fd,
|
||||
const std::function<bool(const std::string& name,
|
||||
const ElfW(Shdr) &)>& callback);
|
||||
|
||||
// Gets the section header for the given name, if it exists. Returns true on
|
||||
// success. Otherwise, returns false.
|
||||
bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
|
||||
ElfW(Shdr) *out);
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
struct SymbolDecoratorArgs {
|
||||
// The program counter we are getting symbolic name for.
|
||||
const void *pc;
|
||||
// 0 for main executable, load address for shared libraries.
|
||||
ptrdiff_t relocation;
|
||||
// Read-only file descriptor for ELF image covering "pc",
|
||||
// or -1 if no such ELF image exists in /proc/self/maps.
|
||||
int fd;
|
||||
// Output buffer, size.
|
||||
// Note: the buffer may not be empty -- default symbolizer may have already
|
||||
// produced some output, and earlier decorators may have adorned it in
|
||||
// some way. You are free to replace or augment the contents (within the
|
||||
// symbol_buf_size limit).
|
||||
char *const symbol_buf;
|
||||
size_t symbol_buf_size;
|
||||
// Temporary scratch space, size.
|
||||
// Use that space in preference to allocating your own stack buffer to
|
||||
// conserve stack.
|
||||
char *const tmp_buf;
|
||||
size_t tmp_buf_size;
|
||||
// User-provided argument
|
||||
void* arg;
|
||||
};
|
||||
using SymbolDecorator = void (*)(const SymbolDecoratorArgs *);
|
||||
|
||||
// Installs a function-pointer as a decorator. Returns a value less than zero
|
||||
// if the system cannot install the decorator. Otherwise, returns a unique
|
||||
// identifier corresponding to the decorator. This identifier can be used to
|
||||
// uninstall the decorator - See RemoveSymbolDecorator() below.
|
||||
int InstallSymbolDecorator(SymbolDecorator decorator, void* arg);
|
||||
|
||||
// Removes a previously installed function-pointer decorator. Parameter "ticket"
|
||||
// is the return-value from calling InstallSymbolDecorator().
|
||||
bool RemoveSymbolDecorator(int ticket);
|
||||
|
||||
// Remove all installed decorators. Returns true if successful, false if
|
||||
// symbolization is currently in progress.
|
||||
bool RemoveAllSymbolDecorators(void);
|
||||
|
||||
// Registers an address range to a file mapping.
|
||||
//
|
||||
// Preconditions:
|
||||
// start <= end
|
||||
// filename != nullptr
|
||||
//
|
||||
// Returns true if the file was successfully registered.
|
||||
bool RegisterFileMappingHint(
|
||||
const void* start, const void* end, uint64_t offset, const char* filename);
|
||||
|
||||
// Looks up the file mapping registered by RegisterFileMappingHint for an
|
||||
// address range. If there is one, the file name is stored in *filename and
|
||||
// *start and *end are modified to reflect the registered mapping. Returns
|
||||
// whether any hint was found.
|
||||
bool GetFileMappingHint(const void** start,
|
||||
const void** end,
|
||||
uint64_t * offset,
|
||||
const char** filename);
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
|
||||
194
third_party/abseil_cpp/absl/debugging/internal/vdso_support.cc
vendored
Normal file
194
third_party/abseil_cpp/absl/debugging/internal/vdso_support.cc
vendored
Normal file
|
|
@ -0,0 +1,194 @@
|
|||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Allow dynamic symbol lookup in the kernel VDSO page.
|
||||
//
|
||||
// VDSOSupport -- a class representing kernel VDSO (if present).
|
||||
|
||||
#include "absl/debugging/internal/vdso_support.h"
|
||||
|
||||
#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval.
|
||||
#include <sys/auxv.h>
|
||||
#endif
|
||||
|
||||
#include "absl/base/dynamic_annotations.h"
|
||||
#include "absl/base/internal/raw_logging.h"
|
||||
#include "absl/base/port.h"
|
||||
|
||||
#ifndef AT_SYSINFO_EHDR
|
||||
#define AT_SYSINFO_EHDR 33 // for crosstoolv10
|
||||
#endif
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
ABSL_CONST_INIT
|
||||
std::atomic<const void *> VDSOSupport::vdso_base_(
|
||||
debugging_internal::ElfMemImage::kInvalidBase);
|
||||
|
||||
std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
|
||||
VDSOSupport::VDSOSupport()
|
||||
// If vdso_base_ is still set to kInvalidBase, we got here
|
||||
// before VDSOSupport::Init has been called. Call it now.
|
||||
: image_(vdso_base_.load(std::memory_order_relaxed) ==
|
||||
debugging_internal::ElfMemImage::kInvalidBase
|
||||
? Init()
|
||||
: vdso_base_.load(std::memory_order_relaxed)) {}
|
||||
|
||||
// NOTE: we can't use GoogleOnceInit() below, because we can be
|
||||
// called by tcmalloc, and none of the *once* stuff may be functional yet.
|
||||
//
|
||||
// In addition, we hope that the VDSOSupportHelper constructor
|
||||
// causes this code to run before there are any threads, and before
|
||||
// InitGoogle() has executed any chroot or setuid calls.
|
||||
//
|
||||
// Finally, even if there is a race here, it is harmless, because
|
||||
// the operation should be idempotent.
|
||||
const void *VDSOSupport::Init() {
|
||||
const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
|
||||
#if __GLIBC_PREREQ(2, 16)
|
||||
if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
|
||||
errno = 0;
|
||||
const void *const sysinfo_ehdr =
|
||||
reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
|
||||
if (errno == 0) {
|
||||
vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
#endif // __GLIBC_PREREQ(2, 16)
|
||||
if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
|
||||
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
|
||||
// on stack, and so glibc works as if VDSO was not present.
|
||||
// But going directly to kernel via /proc/self/auxv below bypasses
|
||||
// Valgrind zapping. So we check for Valgrind separately.
|
||||
if (RunningOnValgrind()) {
|
||||
vdso_base_.store(nullptr, std::memory_order_relaxed);
|
||||
getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
|
||||
return nullptr;
|
||||
}
|
||||
int fd = open("/proc/self/auxv", O_RDONLY);
|
||||
if (fd == -1) {
|
||||
// Kernel too old to have a VDSO.
|
||||
vdso_base_.store(nullptr, std::memory_order_relaxed);
|
||||
getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
|
||||
return nullptr;
|
||||
}
|
||||
ElfW(auxv_t) aux;
|
||||
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
|
||||
if (aux.a_type == AT_SYSINFO_EHDR) {
|
||||
vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
|
||||
std::memory_order_relaxed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
|
||||
// Didn't find AT_SYSINFO_EHDR in auxv[].
|
||||
vdso_base_.store(nullptr, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
GetCpuFn fn = &GetCPUViaSyscall; // default if VDSO not present.
|
||||
if (vdso_base_.load(std::memory_order_relaxed)) {
|
||||
VDSOSupport vdso;
|
||||
SymbolInfo info;
|
||||
if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
|
||||
fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
|
||||
}
|
||||
}
|
||||
// Subtle: this code runs outside of any locks; prevent compiler
|
||||
// from assigning to getcpu_fn_ more than once.
|
||||
getcpu_fn_.store(fn, std::memory_order_relaxed);
|
||||
return vdso_base_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
const void *VDSOSupport::SetBase(const void *base) {
|
||||
ABSL_RAW_CHECK(base != debugging_internal::ElfMemImage::kInvalidBase,
|
||||
"internal error");
|
||||
const void *old_base = vdso_base_.load(std::memory_order_relaxed);
|
||||
vdso_base_.store(base, std::memory_order_relaxed);
|
||||
image_.Init(base);
|
||||
// Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
|
||||
getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
|
||||
return old_base;
|
||||
}
|
||||
|
||||
bool VDSOSupport::LookupSymbol(const char *name,
|
||||
const char *version,
|
||||
int type,
|
||||
SymbolInfo *info) const {
|
||||
return image_.LookupSymbol(name, version, type, info);
|
||||
}
|
||||
|
||||
bool VDSOSupport::LookupSymbolByAddress(const void *address,
|
||||
SymbolInfo *info_out) const {
|
||||
return image_.LookupSymbolByAddress(address, info_out);
|
||||
}
|
||||
|
||||
// NOLINT on 'long' because this routine mimics kernel api.
|
||||
long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int)
|
||||
void *, void *) {
|
||||
#ifdef SYS_getcpu
|
||||
return syscall(SYS_getcpu, cpu, nullptr, nullptr);
|
||||
#else
|
||||
// x86_64 never implemented sys_getcpu(), except as a VDSO call.
|
||||
static_cast<void>(cpu); // Avoid an unused argument compiler warning.
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Use fast __vdso_getcpu if available.
|
||||
long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int)
|
||||
void *x, void *y) {
|
||||
Init();
|
||||
GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
|
||||
ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
|
||||
return (*fn)(cpu, x, y);
|
||||
}
|
||||
|
||||
// This function must be very fast, and may be called from very
|
||||
// low level (e.g. tcmalloc). Hence I avoid things like
|
||||
// GoogleOnceInit() and ::operator new.
|
||||
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
|
||||
int GetCPU() {
|
||||
unsigned cpu;
|
||||
int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
|
||||
return ret_code == 0 ? cpu : ret_code;
|
||||
}
|
||||
|
||||
// We need to make sure VDSOSupport::Init() is called before
|
||||
// InitGoogle() does any setuid or chroot calls. If VDSOSupport
|
||||
// is used in any global constructor, this will happen, since
|
||||
// VDSOSupport's constructor calls Init. But if not, we need to
|
||||
// ensure it here, with a global constructor of our own. This
|
||||
// is an allowed exception to the normal rule against non-trivial
|
||||
// global constructors.
|
||||
static class VDSOInitHelper {
|
||||
public:
|
||||
VDSOInitHelper() { VDSOSupport::Init(); }
|
||||
} vdso_init_helper;
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_HAVE_VDSO_SUPPORT
|
||||
158
third_party/abseil_cpp/absl/debugging/internal/vdso_support.h
vendored
Normal file
158
third_party/abseil_cpp/absl/debugging/internal/vdso_support.h
vendored
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
//
|
||||
// Copyright 2017 The Abseil Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Allow dynamic symbol lookup in the kernel VDSO page.
|
||||
//
|
||||
// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
|
||||
// executable code, which looks like a shared library, but doesn't
|
||||
// necessarily exist anywhere on disk, and which gets mmap()ed into
|
||||
// every process by kernels which support VDSO, such as 2.6.x for 32-bit
|
||||
// executables, and 2.6.24 and above for 64-bit executables.
|
||||
//
|
||||
// More details could be found here:
|
||||
// http://www.trilithium.com/johan/2005/08/linux-gate/
|
||||
//
|
||||
// VDSOSupport -- a class representing kernel VDSO (if present).
|
||||
//
|
||||
// Example usage:
|
||||
// VDSOSupport vdso;
|
||||
// VDSOSupport::SymbolInfo info;
|
||||
// typedef (*FN)(unsigned *, void *, void *);
|
||||
// FN fn = nullptr;
|
||||
// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
|
||||
// fn = reinterpret_cast<FN>(info.address);
|
||||
// }
|
||||
|
||||
#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
|
||||
#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "absl/base/attributes.h"
|
||||
#include "absl/debugging/internal/elf_mem_image.h"
|
||||
|
||||
#ifdef ABSL_HAVE_ELF_MEM_IMAGE
|
||||
|
||||
#ifdef ABSL_HAVE_VDSO_SUPPORT
|
||||
#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set
|
||||
#else
|
||||
#define ABSL_HAVE_VDSO_SUPPORT 1
|
||||
#endif
|
||||
|
||||
namespace absl {
|
||||
ABSL_NAMESPACE_BEGIN
|
||||
namespace debugging_internal {
|
||||
|
||||
// NOTE: this class may be used from within tcmalloc, and can not
|
||||
// use any memory allocation routines.
|
||||
class VDSOSupport {
|
||||
public:
|
||||
VDSOSupport();
|
||||
|
||||
typedef ElfMemImage::SymbolInfo SymbolInfo;
|
||||
typedef ElfMemImage::SymbolIterator SymbolIterator;
|
||||
|
||||
// On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE
|
||||
// depending on how the kernel is built. The kernel is normally built with
|
||||
// STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a
|
||||
// compile-time constant.
|
||||
#ifdef __powerpc64__
|
||||
enum { kVDSOSymbolType = STT_NOTYPE };
|
||||
#else
|
||||
enum { kVDSOSymbolType = STT_FUNC };
|
||||
#endif
|
||||
|
||||
// Answers whether we have a vdso at all.
|
||||
bool IsPresent() const { return image_.IsPresent(); }
|
||||
|
||||
// Allow to iterate over all VDSO symbols.
|
||||
SymbolIterator begin() const { return image_.begin(); }
|
||||
SymbolIterator end() const { return image_.end(); }
|
||||
|
||||
// Look up versioned dynamic symbol in the kernel VDSO.
|
||||
// Returns false if VDSO is not present, or doesn't contain given
|
||||
// symbol/version/type combination.
|
||||
// If info_out != nullptr, additional details are filled in.
|
||||
bool LookupSymbol(const char *name, const char *version,
|
||||
int symbol_type, SymbolInfo *info_out) const;
|
||||
|
||||
// Find info about symbol (if any) which overlaps given address.
|
||||
// Returns true if symbol was found; false if VDSO isn't present
|
||||
// or doesn't have a symbol overlapping given address.
|
||||
// If info_out != nullptr, additional details are filled in.
|
||||
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
|
||||
|
||||
// Used only for testing. Replace real VDSO base with a mock.
|
||||
// Returns previous value of vdso_base_. After you are done testing,
|
||||
// you are expected to call SetBase() with previous value, in order to
|
||||
// reset state to the way it was.
|
||||
const void *SetBase(const void *s);
|
||||
|
||||
// Computes vdso_base_ and returns it. Should be called as early as
|
||||
// possible; before any thread creation, chroot or setuid.
|
||||
static const void *Init();
|
||||
|
||||
private:
|
||||
// image_ represents VDSO ELF image in memory.
|
||||
// image_.ehdr_ == nullptr implies there is no VDSO.
|
||||
ElfMemImage image_;
|
||||
|
||||
// Cached value of auxv AT_SYSINFO_EHDR, computed once.
|
||||
// This is a tri-state:
|
||||
// kInvalidBase => value hasn't been determined yet.
|
||||
// 0 => there is no VDSO.
|
||||
// else => vma of VDSO Elf{32,64}_Ehdr.
|
||||
//
|
||||
// When testing with mock VDSO, low bit is set.
|
||||
// The low bit is always available because vdso_base_ is
|
||||
// page-aligned.
|
||||
static std::atomic<const void *> vdso_base_;
|
||||
|
||||
// NOLINT on 'long' because these routines mimic kernel api.
|
||||
// The 'cache' parameter may be used by some versions of the kernel,
|
||||
// and should be nullptr or point to a static buffer containing at
|
||||
// least two 'long's.
|
||||
static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'.
|
||||
void *unused);
|
||||
static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'.
|
||||
void *unused);
|
||||
typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'.
|
||||
void *unused);
|
||||
|
||||
// This function pointer may point to InitAndGetCPU,
|
||||
// GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization.
|
||||
ABSL_CONST_INIT static std::atomic<GetCpuFn> getcpu_fn_;
|
||||
|
||||
friend int GetCPU(void); // Needs access to getcpu_fn_.
|
||||
|
||||
VDSOSupport(const VDSOSupport&) = delete;
|
||||
VDSOSupport& operator=(const VDSOSupport&) = delete;
|
||||
};
|
||||
|
||||
// Same as sched_getcpu() on later glibc versions.
|
||||
// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present,
|
||||
// otherwise use syscall(SYS_getcpu,...).
|
||||
// May return -1 with errno == ENOSYS if the kernel doesn't
|
||||
// support SYS_getcpu.
|
||||
int GetCPU();
|
||||
|
||||
} // namespace debugging_internal
|
||||
ABSL_NAMESPACE_END
|
||||
} // namespace absl
|
||||
|
||||
#endif // ABSL_HAVE_ELF_MEM_IMAGE
|
||||
|
||||
#endif // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_
|
||||
Loading…
Add table
Add a link
Reference in a new issue