merge(3p/absl): subtree merge of Abseil up to e19260f

... notably, this includes Abseil's own StatusOr type, which
conflicted with our implementation (that was taken from TensorFlow).

Change-Id: Ie7d6764b64055caaeb8dc7b6b9d066291e6b538f
This commit is contained in:
Vincent Ambo 2020-11-21 14:43:54 +01:00
parent cc27324d02
commit 082c006c04
854 changed files with 11260 additions and 5296 deletions

View file

@ -0,0 +1,154 @@
// Copyright 2020 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
#include "absl/base/config.h"
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#include <unistd.h>
#endif
#ifdef __linux__
#include <linux/futex.h>
#include <sys/syscall.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <time.h>
#include <atomic>
#include <cstdint>
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
#ifdef ABSL_INTERNAL_HAVE_FUTEX
#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
#elif defined(__BIONIC__)
// Bionic supports all the futex operations we need even when some of the futex
// definitions are missing.
#define ABSL_INTERNAL_HAVE_FUTEX
#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
#define ABSL_INTERNAL_HAVE_FUTEX
#endif
#ifdef ABSL_INTERNAL_HAVE_FUTEX
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// Some Android headers are missing these definitions even though they
// support these futex operations.
#ifdef __BIONIC__
#ifndef SYS_futex
#define SYS_futex __NR_futex
#endif
#ifndef FUTEX_WAIT_BITSET
#define FUTEX_WAIT_BITSET 9
#endif
#ifndef FUTEX_PRIVATE_FLAG
#define FUTEX_PRIVATE_FLAG 128
#endif
#ifndef FUTEX_CLOCK_REALTIME
#define FUTEX_CLOCK_REALTIME 256
#endif
#ifndef FUTEX_BITSET_MATCH_ANY
#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
#endif
#endif
#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
#define SYS_futex_time64 __NR_futex_time64
#endif
#if defined(SYS_futex_time64) && !defined(SYS_futex)
#define SYS_futex SYS_futex_time64
#endif
class FutexImpl {
public:
static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
KernelTimeout t) {
int err = 0;
if (t.has_timeout()) {
// https://locklessinc.com/articles/futex_cheat_sheet/
// Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
struct timespec abs_timeout = t.MakeAbsTimespec();
// Atomically check that the futex value is still 0, and if it
// is, sleep until abs_timeout or until woken by FUTEX_WAKE.
err = syscall(
SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
&abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
} else {
// Atomically check that the futex value is still 0, and if it
// is, sleep until woken by FUTEX_WAKE.
err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
}
if (ABSL_PREDICT_FALSE(err != 0)) {
err = -errno;
}
return err;
}
static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
int32_t bits,
const struct timespec *abstime) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
nullptr, bits);
if (ABSL_PREDICT_FALSE(err != 0)) {
err = -errno;
}
return err;
}
static int Wake(std::atomic<int32_t> *v, int32_t count) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) {
err = -errno;
}
return err;
}
// FUTEX_WAKE_BITSET
static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
nullptr, bits);
if (ABSL_PREDICT_FALSE(err < 0)) {
err = -errno;
}
return err;
}
};
class Futex : public FutexImpl {};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_INTERNAL_HAVE_FUTEX
#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_

View file

@ -37,6 +37,7 @@
#include <algorithm>
#include <array>
#include <limits>
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"

View file

@ -26,6 +26,7 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#include <time.h>
#include <algorithm>
#include <limits>
@ -57,6 +58,10 @@ class KernelTimeout {
bool has_timeout() const { return ns_ != 0; }
// Convert to parameter for sem_timedwait/futex/similar. Only for approved
// users. Do not call if !has_timeout.
struct timespec MakeAbsTimespec();
private:
// internal rep, not user visible: ns after unix epoch.
// zero = no timeout.
@ -82,34 +87,6 @@ class KernelTimeout {
return x;
}
// Convert to parameter for sem_timedwait/futex/similar. Only for approved
// users. Do not call if !has_timeout.
struct timespec MakeAbsTimespec() {
int64_t n = ns_;
static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
if (n == 0) {
ABSL_RAW_LOG(
ERROR,
"Tried to create a timespec from a non-timeout; never do this.");
// But we'll try to continue sanely. no-timeout ~= saturated timeout.
n = (std::numeric_limits<int64_t>::max)();
}
// Kernel APIs validate timespecs as being at or after the epoch,
// despite the kernel time type being signed. However, no one can
// tell the difference between a timeout at or before the epoch (since
// all such timeouts have expired!)
if (n < 0) n = 0;
struct timespec abstime;
int64_t seconds = (std::min)(n / kNanosPerSecond,
int64_t{(std::numeric_limits<time_t>::max)()});
abstime.tv_sec = static_cast<time_t>(seconds);
abstime.tv_nsec =
static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
return abstime;
}
#ifdef _WIN32
// Converts to milliseconds from now, or INFINITE when
// !has_timeout(). For use by SleepConditionVariableSRW on
@ -148,6 +125,30 @@ class KernelTimeout {
friend class Waiter;
};
inline struct timespec KernelTimeout::MakeAbsTimespec() {
int64_t n = ns_;
static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
if (n == 0) {
ABSL_RAW_LOG(
ERROR, "Tried to create a timespec from a non-timeout; never do this.");
// But we'll try to continue sanely. no-timeout ~= saturated timeout.
n = (std::numeric_limits<int64_t>::max)();
}
// Kernel APIs validate timespecs as being at or after the epoch,
// despite the kernel time type being signed. However, no one can
// tell the difference between a timeout at or before the epoch (since
// all such timeouts have expired!)
if (n < 0) n = 0;
struct timespec abstime;
int64_t seconds = (std::min)(n / kNanosPerSecond,
int64_t{(std::numeric_limits<time_t>::max)()});
abstime.tv_sec = static_cast<time_t>(seconds);
abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
return abstime;
}
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl

View file

@ -1,324 +0,0 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Implementation of a small subset of Mutex and CondVar functionality
// for platforms where the production implementation hasn't been fully
// ported yet.
#include "absl/synchronization/mutex.h"
#if defined(_WIN32)
#include <chrono> // NOLINT(build/c++11)
#else
#include <sys/time.h>
#include <time.h>
#endif
#include <algorithm>
#include "absl/base/internal/raw_logging.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
void SetMutexDeadlockDetectionMode(OnDeadlockCycle) {}
void EnableMutexInvariantDebugging(bool) {}
namespace synchronization_internal {
namespace {
// Return the current time plus the timeout.
absl::Time DeadlineFromTimeout(absl::Duration timeout) {
return absl::Now() + timeout;
}
// Limit the deadline to a positive, 32-bit time_t value to accommodate
// implementation restrictions. This also deals with InfinitePast and
// InfiniteFuture.
absl::Time LimitedDeadline(absl::Time deadline) {
deadline = std::max(absl::FromTimeT(0), deadline);
deadline = std::min(deadline, absl::FromTimeT(0x7fffffff));
return deadline;
}
} // namespace
#if defined(_WIN32)
MutexImpl::MutexImpl() {}
MutexImpl::~MutexImpl() {
if (locked_) {
std_mutex_.unlock();
}
}
void MutexImpl::Lock() {
std_mutex_.lock();
locked_ = true;
}
bool MutexImpl::TryLock() {
bool locked = std_mutex_.try_lock();
if (locked) locked_ = true;
return locked;
}
void MutexImpl::Unlock() {
locked_ = false;
released_.SignalAll();
std_mutex_.unlock();
}
CondVarImpl::CondVarImpl() {}
CondVarImpl::~CondVarImpl() {}
void CondVarImpl::Signal() { std_cv_.notify_one(); }
void CondVarImpl::SignalAll() { std_cv_.notify_all(); }
void CondVarImpl::Wait(MutexImpl* mu) {
mu->released_.SignalAll();
std_cv_.wait(mu->std_mutex_);
}
bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
mu->released_.SignalAll();
time_t when = ToTimeT(deadline);
int64_t nanos = ToInt64Nanoseconds(deadline - absl::FromTimeT(when));
std::chrono::system_clock::time_point deadline_tp =
std::chrono::system_clock::from_time_t(when) +
std::chrono::duration_cast<std::chrono::system_clock::duration>(
std::chrono::nanoseconds(nanos));
auto deadline_since_epoch =
std::chrono::duration_cast<std::chrono::duration<double>>(
deadline_tp - std::chrono::system_clock::from_time_t(0));
return std_cv_.wait_until(mu->std_mutex_, deadline_tp) ==
std::cv_status::timeout;
}
#else // ! _WIN32
MutexImpl::MutexImpl() {
ABSL_RAW_CHECK(pthread_mutex_init(&pthread_mutex_, nullptr) == 0,
"pthread error");
}
MutexImpl::~MutexImpl() {
if (locked_) {
ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
}
ABSL_RAW_CHECK(pthread_mutex_destroy(&pthread_mutex_) == 0, "pthread error");
}
void MutexImpl::Lock() {
ABSL_RAW_CHECK(pthread_mutex_lock(&pthread_mutex_) == 0, "pthread error");
locked_ = true;
}
bool MutexImpl::TryLock() {
bool locked = (0 == pthread_mutex_trylock(&pthread_mutex_));
if (locked) locked_ = true;
return locked;
}
void MutexImpl::Unlock() {
locked_ = false;
released_.SignalAll();
ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
}
CondVarImpl::CondVarImpl() {
ABSL_RAW_CHECK(pthread_cond_init(&pthread_cv_, nullptr) == 0,
"pthread error");
}
CondVarImpl::~CondVarImpl() {
ABSL_RAW_CHECK(pthread_cond_destroy(&pthread_cv_) == 0, "pthread error");
}
void CondVarImpl::Signal() {
ABSL_RAW_CHECK(pthread_cond_signal(&pthread_cv_) == 0, "pthread error");
}
void CondVarImpl::SignalAll() {
ABSL_RAW_CHECK(pthread_cond_broadcast(&pthread_cv_) == 0, "pthread error");
}
void CondVarImpl::Wait(MutexImpl* mu) {
mu->released_.SignalAll();
ABSL_RAW_CHECK(pthread_cond_wait(&pthread_cv_, &mu->pthread_mutex_) == 0,
"pthread error");
}
bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
mu->released_.SignalAll();
struct timespec ts = ToTimespec(deadline);
int rc = pthread_cond_timedwait(&pthread_cv_, &mu->pthread_mutex_, &ts);
if (rc == ETIMEDOUT) return true;
ABSL_RAW_CHECK(rc == 0, "pthread error");
return false;
}
#endif // ! _WIN32
void MutexImpl::Await(const Condition& cond) {
if (cond.Eval()) return;
released_.SignalAll();
do {
released_.Wait(this);
} while (!cond.Eval());
}
bool MutexImpl::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
if (cond.Eval()) return true;
released_.SignalAll();
while (true) {
if (released_.WaitWithDeadline(this, deadline)) return false;
if (cond.Eval()) return true;
}
}
} // namespace synchronization_internal
Mutex::Mutex() {}
Mutex::~Mutex() {}
void Mutex::Lock() { impl()->Lock(); }
void Mutex::Unlock() { impl()->Unlock(); }
bool Mutex::TryLock() { return impl()->TryLock(); }
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
void Mutex::Await(const Condition& cond) { impl()->Await(cond); }
void Mutex::LockWhen(const Condition& cond) {
Lock();
Await(cond);
}
bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
return impl()->AwaitWithDeadline(
cond, synchronization_internal::LimitedDeadline(deadline));
}
bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
return AwaitWithDeadline(
cond, synchronization_internal::DeadlineFromTimeout(timeout));
}
bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
Lock();
return AwaitWithDeadline(cond, deadline);
}
bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
return LockWhenWithDeadline(
cond, synchronization_internal::DeadlineFromTimeout(timeout));
}
void Mutex::ReaderLockWhen(const Condition& cond) {
ReaderLock();
Await(cond);
}
bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
absl::Duration timeout) {
return LockWhenWithTimeout(cond, timeout);
}
bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
absl::Time deadline) {
return LockWhenWithDeadline(cond, deadline);
}
void Mutex::EnableDebugLog(const char*) {}
void Mutex::EnableInvariantDebugging(void (*)(void*), void*) {}
void Mutex::ForgetDeadlockInfo() {}
void Mutex::AssertHeld() const {}
void Mutex::AssertReaderHeld() const {}
void Mutex::AssertNotHeld() const {}
CondVar::CondVar() {}
CondVar::~CondVar() {}
void CondVar::Signal() { impl()->Signal(); }
void CondVar::SignalAll() { impl()->SignalAll(); }
void CondVar::Wait(Mutex* mu) { return impl()->Wait(mu->impl()); }
bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
return impl()->WaitWithDeadline(
mu->impl(), synchronization_internal::LimitedDeadline(deadline));
}
bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
return WaitWithDeadline(mu, absl::Now() + timeout);
}
void CondVar::EnableDebugLog(const char*) {}
#ifdef THREAD_SANITIZER
extern "C" void __tsan_read1(void *addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
#endif
// A function that just returns its argument, dereferenced
static bool Dereference(void *arg) {
// ThreadSanitizer does not instrument this file for memory accesses.
// This function dereferences a user variable that can participate
// in a data race, so we need to manually tell TSan about this memory access.
__tsan_read1(arg);
return *(static_cast<bool *>(arg));
}
Condition::Condition() {} // null constructor, used for kTrue only
const Condition Condition::kTrue;
Condition::Condition(bool (*func)(void *), void *arg)
: eval_(&CallVoidPtrFunction),
function_(func),
method_(nullptr),
arg_(arg) {}
bool Condition::CallVoidPtrFunction(const Condition *c) {
return (*c->function_)(c->arg_);
}
Condition::Condition(const bool *cond)
: eval_(CallVoidPtrFunction),
function_(Dereference),
method_(nullptr),
// const_cast is safe since Dereference does not modify arg
arg_(const_cast<bool *>(cond)) {}
bool Condition::Eval() const {
// eval_ == null for kTrue
return (this->eval_ == nullptr) || (*this->eval_)(this);
}
void RegisterSymbolizer(bool (*)(const void*, char*, int)) {}
ABSL_NAMESPACE_END
} // namespace absl

View file

@ -1,249 +0,0 @@
// Do not include. This is an implementation detail of base/mutex.h.
//
// Declares three classes:
//
// base::internal::MutexImpl - implementation helper for Mutex
// base::internal::CondVarImpl - implementation helper for CondVar
// base::internal::SynchronizationStorage<T> - implementation helper for
// Mutex, CondVar
#include <type_traits>
#if defined(_WIN32)
#include <condition_variable>
#include <mutex>
#else
#include <pthread.h>
#endif
#include "absl/base/call_once.h"
#include "absl/time/time.h"
// Declare that Mutex::ReaderLock is actually Lock(). Intended primarily
// for tests, and even then as a last resort.
#ifdef ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
#error ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE cannot be directly set
#else
#define ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE 1
#endif
// Declare that Mutex::EnableInvariantDebugging is not implemented.
// Intended primarily for tests, and even then as a last resort.
#ifdef ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED
#error ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED cannot be directly set
#else
#define ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED 1
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
class Condition;
namespace synchronization_internal {
class MutexImpl;
// Do not use this implementation detail of CondVar. Provides most of the
// implementation, but should not be placed directly in static storage
// because it will not linker initialize properly. See
// SynchronizationStorage<T> below for what we mean by linker
// initialization.
class CondVarImpl {
public:
CondVarImpl();
CondVarImpl(const CondVarImpl&) = delete;
CondVarImpl& operator=(const CondVarImpl&) = delete;
~CondVarImpl();
void Signal();
void SignalAll();
void Wait(MutexImpl* mutex);
bool WaitWithDeadline(MutexImpl* mutex, absl::Time deadline);
private:
#if defined(_WIN32)
std::condition_variable_any std_cv_;
#else
pthread_cond_t pthread_cv_;
#endif
};
// Do not use this implementation detail of Mutex. Provides most of the
// implementation, but should not be placed directly in static storage
// because it will not linker initialize properly. See
// SynchronizationStorage<T> below for what we mean by linker
// initialization.
class MutexImpl {
public:
MutexImpl();
MutexImpl(const MutexImpl&) = delete;
MutexImpl& operator=(const MutexImpl&) = delete;
~MutexImpl();
void Lock();
bool TryLock();
void Unlock();
void Await(const Condition& cond);
bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
private:
friend class CondVarImpl;
#if defined(_WIN32)
std::mutex std_mutex_;
#else
pthread_mutex_t pthread_mutex_;
#endif
// True if the underlying mutex is locked. If the destructor is entered
// while locked_, the underlying mutex is unlocked. Mutex supports
// destruction while locked, but the same is undefined behavior for both
// pthread_mutex_t and std::mutex.
bool locked_ = false;
// Signaled before releasing the lock, in support of Await.
CondVarImpl released_;
};
// Do not use this implementation detail of CondVar and Mutex. A storage
// space for T that supports a LinkerInitialized constructor. T must
// have a default constructor, which is called by the first call to
// get(). T's destructor is never called if the LinkerInitialized
// constructor is called.
//
// Objects constructed with the default constructor are constructed and
// destructed like any other object, and should never be allocated in
// static storage.
//
// Objects constructed with the LinkerInitialized constructor should
// always be in static storage. For such objects, calls to get() are always
// valid, except from signal handlers.
//
// Note that this implementation relies on undefined language behavior that
// are known to hold for the set of supported compilers. An analysis
// follows.
//
// From the C++11 standard:
//
// [basic.life] says an object has non-trivial initialization if it is of
// class type and it is initialized by a constructor other than a trivial
// default constructor. (the LinkerInitialized constructor is
// non-trivial)
//
// [basic.life] says the lifetime of an object with a non-trivial
// constructor begins when the call to the constructor is complete.
//
// [basic.life] says the lifetime of an object with non-trivial destructor
// ends when the call to the destructor begins.
//
// [basic.life] p5 specifies undefined behavior when accessing non-static
// members of an instance outside its
// lifetime. (SynchronizationStorage::get() access non-static members)
//
// So, LinkerInitialized object of SynchronizationStorage uses a
// non-trivial constructor, which is called at some point during dynamic
// initialization, and is therefore subject to order of dynamic
// initialization bugs, where get() is called before the object's
// constructor is, resulting in undefined behavior.
//
// Similarly, a LinkerInitialized SynchronizationStorage object has a
// non-trivial destructor, and so its lifetime ends at some point during
// destruction of objects with static storage duration [basic.start.term]
// p4. There is a window where other exit code could call get() after this
// occurs, resulting in undefined behavior.
//
// Combined, these statements imply that LinkerInitialized instances
// of SynchronizationStorage<T> rely on undefined behavior.
//
// However, in practice, the implementation works on all supported
// compilers. Specifically, we rely on:
//
// a) zero-initialization being sufficient to initialize
// LinkerInitialized instances for the purposes of calling
// get(), regardless of when the constructor is called. This is
// because the is_dynamic_ boolean is correctly zero-initialized to
// false.
//
// b) the LinkerInitialized constructor is a NOP, and immaterial to
// even to concurrent calls to get().
//
// c) the destructor being a NOP for LinkerInitialized objects
// (guaranteed by a check for !is_dynamic_), and so any concurrent and
// subsequent calls to get() functioning as if the destructor were not
// called, by virtue of the instances' storage remaining valid after the
// destructor runs.
//
// d) That a-c apply transitively when SynchronizationStorage<T> is the
// only member of a class allocated in static storage.
//
// Nothing in the language standard guarantees that a-d hold. In practice,
// these hold in all supported compilers.
//
// Future direction:
//
// Ideally, we would simply use std::mutex or a similar class, which when
// allocated statically would support use immediately after static
// initialization up until static storage is reclaimed (i.e. the properties
// we require of all "linker initialized" instances).
//
// Regarding construction in static storage, std::mutex is required to
// provide a constexpr default constructor [thread.mutex.class], which
// ensures the instance's lifetime begins with static initialization
// [basic.start.init], and so is immune to any problems caused by the order
// of dynamic initialization. However, as of this writing Microsoft's
// Visual Studio does not provide a constexpr constructor for std::mutex.
// See
// https://blogs.msdn.microsoft.com/vcblog/2015/06/02/constexpr-complete-for-vs-2015-rtm-c11-compiler-c17-stl/
//
// Regarding destruction of instances in static storage, [basic.life] does
// say an object ends when storage in which the occupies is released, in
// the case of non-trivial destructor. However, std::mutex is not specified
// to have a trivial destructor.
//
// So, we would need a class with a constexpr default constructor and a
// trivial destructor. Today, we can achieve neither desired property using
// std::mutex directly.
template <typename T>
class SynchronizationStorage {
public:
// Instances allocated on the heap or on the stack should use the default
// constructor.
SynchronizationStorage()
: destruct_(true), once_() {}
constexpr explicit SynchronizationStorage(absl::ConstInitType)
: destruct_(false), once_(), space_{{0}} {}
SynchronizationStorage(SynchronizationStorage&) = delete;
SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
~SynchronizationStorage() {
if (destruct_) {
get()->~T();
}
}
// Retrieve the object in storage. This is fast and thread safe, but does
// incur the cost of absl::call_once().
T* get() {
absl::call_once(once_, SynchronizationStorage::Construct, this);
return reinterpret_cast<T*>(&space_);
}
private:
static void Construct(SynchronizationStorage<T>* self) {
new (&self->space_) T();
}
// When true, T's destructor is run when this is destructed.
const bool destruct_;
absl::once_flag once_;
// An aligned space for the T.
alignas(T) unsigned char space_[sizeof(T)];
};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl

View file

@ -78,7 +78,7 @@ class PerThreadSem {
// !t.has_timeout() => Wait(t) will return true.
static inline bool Wait(KernelTimeout t);
// White-listed callers.
// Permitted callers.
friend class PerThreadSemTest;
friend class absl::Mutex;
friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();

View file

@ -23,6 +23,7 @@
#include <thread> // NOLINT(build/c++11)
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/strings/str_cat.h"

View file

@ -48,6 +48,7 @@
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
@ -66,71 +67,6 @@ static void MaybeBecomeIdle() {
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
// Some Android headers are missing these definitions even though they
// support these futex operations.
#ifdef __BIONIC__
#ifndef SYS_futex
#define SYS_futex __NR_futex
#endif
#ifndef FUTEX_WAIT_BITSET
#define FUTEX_WAIT_BITSET 9
#endif
#ifndef FUTEX_PRIVATE_FLAG
#define FUTEX_PRIVATE_FLAG 128
#endif
#ifndef FUTEX_CLOCK_REALTIME
#define FUTEX_CLOCK_REALTIME 256
#endif
#ifndef FUTEX_BITSET_MATCH_ANY
#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
#endif
#endif
#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
#define SYS_futex_time64 __NR_futex_time64
#endif
#if defined(SYS_futex_time64) && !defined(SYS_futex)
#define SYS_futex SYS_futex_time64
#endif
class Futex {
public:
static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
KernelTimeout t) {
int err = 0;
if (t.has_timeout()) {
// https://locklessinc.com/articles/futex_cheat_sheet/
// Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
struct timespec abs_timeout = t.MakeAbsTimespec();
// Atomically check that the futex value is still 0, and if it
// is, sleep until abs_timeout or until woken by FUTEX_WAKE.
err = syscall(
SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
&abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
} else {
// Atomically check that the futex value is still 0, and if it
// is, sleep until woken by FUTEX_WAKE.
err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
}
if (err != 0) {
err = -errno;
}
return err;
}
static int Wake(std::atomic<int32_t> *v, int32_t count) {
int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) {
err = -errno;
}
return err;
}
};
Waiter::Waiter() {
futex_.store(0, std::memory_order_relaxed);
}

View file

@ -36,6 +36,7 @@
#include <cstdint>
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/futex.h"
#include "absl/synchronization/internal/kernel_timeout.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
@ -48,12 +49,7 @@
#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
#elif defined(__BIONIC__)
// Bionic supports all the futex operations we need even when some of the futex
// definitions are missing.
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
#elif defined(ABSL_HAVE_SEMAPHORE_H)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
@ -100,7 +96,7 @@ class Waiter {
}
// How many periods to remain idle before releasing resources
#ifndef THREAD_SANITIZER
#ifndef ABSL_HAVE_THREAD_SANITIZER
static constexpr int kIdlePeriods = 60;
#else
// Memory consumption under ThreadSanitizer is a serious concern,