merge(3p/absl): subtree merge of Abseil up to e19260f

... notably, this includes Abseil's own StatusOr type, which
conflicted with our implementation (that was taken from TensorFlow).

Change-Id: Ie7d6764b64055caaeb8dc7b6b9d066291e6b538f
This commit is contained in:
Vincent Ambo 2020-11-21 14:43:54 +01:00
parent cc27324d02
commit 082c006c04
854 changed files with 11260 additions and 5296 deletions

File diff suppressed because it is too large Load diff

View file

@ -23,6 +23,7 @@
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
@ -68,8 +69,21 @@ class btree_container {
explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {}
btree_container(const btree_container &other) = default;
btree_container(btree_container &&other) noexcept = default;
explicit btree_container(const allocator_type &alloc)
: tree_(key_compare(), alloc) {}
btree_container(const btree_container &other)
: btree_container(other, absl::allocator_traits<allocator_type>::
select_on_container_copy_construction(
other.get_allocator())) {}
btree_container(const btree_container &other, const allocator_type &alloc)
: tree_(other.tree_, alloc) {}
btree_container(btree_container &&other) noexcept(
std::is_nothrow_move_constructible<Tree>::value) = default;
btree_container(btree_container &&other, const allocator_type &alloc)
: tree_(std::move(other.tree_), alloc) {}
btree_container &operator=(const btree_container &other) = default;
btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default;
@ -90,6 +104,11 @@ class btree_container {
// Lookup routines.
template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
auto equal_range = this->equal_range(key);
return std::distance(equal_range.first, equal_range.second);
}
template <typename K = key_type>
iterator find(const key_arg<K> &key) {
return tree_.find(key);
}
@ -138,6 +157,11 @@ class btree_container {
iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second;
}
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
auto equal_range = this->equal_range(key);
return tree_.erase_range(equal_range.first, equal_range.second).first;
}
// Extract routines.
node_type extract(iterator position) {
@ -151,7 +175,6 @@ class btree_container {
return extract(iterator(position));
}
public:
// Utility routines.
void clear() { tree_.clear(); }
void swap(btree_container &other) { tree_.swap(other.tree_); }
@ -235,7 +258,7 @@ class btree_set_container : public btree_container<Tree> {
using super_type::super_type;
btree_set_container() {}
// Range constructor.
// Range constructors.
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@ -243,18 +266,19 @@ class btree_set_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_set_container(b, e, key_compare(), alloc) {}
// Initializer list constructor.
// Initializer list constructors.
btree_set_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_set_container(init.begin(), init.end(), comp, alloc) {}
// Lookup routines.
template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
return this->tree_.count_unique(key);
}
btree_set_container(std::initializer_list<init_type> init,
const allocator_type &alloc)
: btree_set_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
std::pair<iterator, bool> insert(const value_type &v) {
@ -268,31 +292,29 @@ class btree_set_container : public btree_container<Tree> {
init_type v(std::forward<Args>(args)...);
return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
iterator insert(const_iterator position, const value_type &v) {
iterator insert(const_iterator hint, const value_type &v) {
return this->tree_
.insert_hint_unique(iterator(position), params_type::key(v), v)
.insert_hint_unique(iterator(hint), params_type::key(v), v)
.first;
}
iterator insert(const_iterator position, value_type &&v) {
iterator insert(const_iterator hint, value_type &&v) {
return this->tree_
.insert_hint_unique(iterator(position), params_type::key(v),
std::move(v))
.insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename... Args>
iterator emplace_hint(const_iterator position, Args &&... args) {
iterator emplace_hint(const_iterator hint, Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_
.insert_hint_unique(iterator(position), params_type::key(v),
std::move(v))
.insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
this->tree_.insert_iterator_unique(b, e);
this->tree_.insert_iterator_unique(b, e, 0);
}
void insert(std::initializer_list<init_type> init) {
this->tree_.insert_iterator_unique(init.begin(), init.end());
this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
}
insert_return_type insert(node_type &&node) {
if (!node) return {this->end(), false, node_type()};
@ -315,14 +337,10 @@ class btree_set_container : public btree_container<Tree> {
return res.first;
}
// Deletion routines.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_unique(key);
}
using super_type::erase;
// Node extraction routines.
// TODO(ezb): when the comparator is heterogeneous and has different
// equivalence classes for different lookup types, we should extract the first
// equivalent value if there are multiple.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
auto it = this->find(key);
@ -344,7 +362,7 @@ class btree_set_container : public btree_container<Tree> {
int> = 0>
void merge(btree_container<T> &src) { // NOLINT
for (auto src_it = src.begin(); src_it != src.end();) {
if (insert(std::move(*src_it)).second) {
if (insert(std::move(params_type::element(src_it.slot()))).second) {
src_it = src.erase(src_it);
} else {
++src_it;
@ -371,6 +389,7 @@ template <typename Tree>
class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type;
friend class BtreeNodePeer;
private:
template <class K>
@ -392,111 +411,72 @@ class btree_map_container : public btree_set_container<Tree> {
// Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
// Note: when we call `std::forward<M>(obj)` twice, it's safe because
// insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
// `ret.second` is false.
template <class M>
std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
if (!ret.second) ret.first->second = obj;
return ret;
template <typename K = key_type, class M>
std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
const M &obj) {
return insert_or_assign_impl(k, obj);
}
template <class M, key_type * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_unique(k, std::move(k), obj);
if (!ret.second) ret.first->second = obj;
return ret;
template <typename K = key_type, class M, K * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
return insert_or_assign_impl(std::forward<K>(k), obj);
}
template <class M, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_unique(k, k, std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret;
template <typename K = key_type, class M, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
return insert_or_assign_impl(k, std::forward<M>(obj));
}
template <class M, key_type * = nullptr, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret;
template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
}
template <class M>
iterator insert_or_assign(const_iterator position, const key_type &k,
template <typename K = key_type, class M>
iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
const M &obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_hint_unique(iterator(position), k, k, obj);
if (!ret.second) ret.first->second = obj;
return ret.first;
return insert_or_assign_hint_impl(hint, k, obj);
}
template <class M, key_type * = nullptr>
iterator insert_or_assign(const_iterator position, key_type &&k,
const M &obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(position), k, std::move(k), obj);
if (!ret.second) ret.first->second = obj;
return ret.first;
template <typename K = key_type, class M, K * = nullptr>
iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
}
template <class M, M * = nullptr>
iterator insert_or_assign(const_iterator position, const key_type &k,
M &&obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(position), k, k, std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret.first;
template <typename K = key_type, class M, M * = nullptr>
iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
}
template <class M, key_type * = nullptr, M * = nullptr>
iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(position), k, std::move(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret.first;
template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
return insert_or_assign_hint_impl(hint, std::forward<K>(k),
std::forward<M>(obj));
}
template <typename... Args>
std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
return this->tree_.insert_unique(
k, std::piecewise_construct, std::forward_as_tuple(k),
std::forward_as_tuple(std::forward<Args>(args)...));
template <typename K = key_type, typename... Args,
typename absl::enable_if_t<
!std::is_convertible<K, const_iterator>::value, int> = 0>
std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
return try_emplace_impl(k, std::forward<Args>(args)...);
}
template <typename... Args>
std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) {
// Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
// and then using `k` unsequenced. This is safe because the move is into a
// forwarding reference and insert_unique guarantees that `key` is never
// referenced after consuming `args`.
const key_type &key_ref = k;
return this->tree_.insert_unique(
key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
template <typename K = key_type, typename... Args,
typename absl::enable_if_t<
!std::is_convertible<K, const_iterator>::value, int> = 0>
std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
template <typename... Args>
iterator try_emplace(const_iterator hint, const key_type &k,
template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, const key_arg<K> &k,
Args &&... args) {
return this->tree_
.insert_hint_unique(iterator(hint), k, std::piecewise_construct,
std::forward_as_tuple(k),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
}
template <typename... Args>
iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) {
// Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
// and then using `k` unsequenced. This is safe because the move is into a
// forwarding reference and insert_hint_unique guarantees that `key` is
// never referenced after consuming `args`.
const key_type &key_ref = k;
return this->tree_
.insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
std::forward_as_tuple(std::move(k)),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
return try_emplace_hint_impl(hint, std::forward<K>(k),
std::forward<Args>(args)...);
}
mapped_type &operator[](const key_type &k) {
template <typename K = key_type>
mapped_type &operator[](const key_arg<K> &k) {
return try_emplace(k).first->second;
}
mapped_type &operator[](key_type &&k) {
return try_emplace(std::move(k)).first->second;
template <typename K = key_type>
mapped_type &operator[](key_arg<K> &&k) {
return try_emplace(std::forward<K>(k)).first->second;
}
template <typename K = key_type>
@ -513,6 +493,40 @@ class btree_map_container : public btree_set_container<Tree> {
base_internal::ThrowStdOutOfRange("absl::btree_map::at");
return it->second;
}
private:
// Note: when we call `std::forward<M>(obj)` twice, it's safe because
// insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
// `ret.second` is false.
template <class K, class M>
std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
const std::pair<iterator, bool> ret =
this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret;
}
template <class K, class M>
iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
if (!ret.second) ret.first->second = std::forward<M>(obj);
return ret.first;
}
template <class K, class... Args>
std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
return this->tree_.insert_unique(
k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
}
template <class K, class... Args>
iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
return this->tree_
.insert_hint_unique(iterator(hint), k, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...))
.first;
}
};
// A common base class for btree_multiset and btree_multimap.
@ -540,7 +554,7 @@ class btree_multiset_container : public btree_container<Tree> {
using super_type::super_type;
btree_multiset_container() {}
// Range constructor.
// Range constructors.
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@ -548,29 +562,30 @@ class btree_multiset_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const allocator_type &alloc)
: btree_multiset_container(b, e, key_compare(), alloc) {}
// Initializer list constructor.
// Initializer list constructors.
btree_multiset_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
// Lookup routines.
template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
return this->tree_.count_multi(key);
}
btree_multiset_container(std::initializer_list<init_type> init,
const allocator_type &alloc)
: btree_multiset_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
iterator insert(value_type &&v) {
return this->tree_.insert_multi(std::move(v));
}
iterator insert(const_iterator position, const value_type &v) {
return this->tree_.insert_hint_multi(iterator(position), v);
iterator insert(const_iterator hint, const value_type &v) {
return this->tree_.insert_hint_multi(iterator(hint), v);
}
iterator insert(const_iterator position, value_type &&v) {
return this->tree_.insert_hint_multi(iterator(position), std::move(v));
iterator insert(const_iterator hint, value_type &&v) {
return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
@ -584,9 +599,9 @@ class btree_multiset_container : public btree_container<Tree> {
return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
}
template <typename... Args>
iterator emplace_hint(const_iterator position, Args &&... args) {
iterator emplace_hint(const_iterator hint, Args &&... args) {
return this->tree_.insert_hint_multi(
iterator(position), init_type(std::forward<Args>(args)...));
iterator(hint), init_type(std::forward<Args>(args)...));
}
iterator insert(node_type &&node) {
if (!node) return this->end();
@ -605,14 +620,9 @@ class btree_multiset_container : public btree_container<Tree> {
return res;
}
// Deletion routines.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_multi(key);
}
using super_type::erase;
// Node extraction routines.
// TODO(ezb): we are supposed to extract the first equivalent key if there are
// multiple, but this isn't guaranteed to extract the first one.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
auto it = this->find(key);
@ -632,8 +642,9 @@ class btree_multiset_container : public btree_container<Tree> {
typename T::params_type::is_map_container>>::value,
int> = 0>
void merge(btree_container<T> &src) { // NOLINT
insert(std::make_move_iterator(src.begin()),
std::make_move_iterator(src.end()));
for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
insert(std::move(params_type::element(src_it.slot())));
}
src.clear();
}

View file

@ -146,8 +146,11 @@ class node_handle<Policy, PolicyTraits, Alloc,
constexpr node_handle() {}
auto key() const -> decltype(PolicyTraits::key(std::declval<slot_type*>())) {
return PolicyTraits::key(this->slot());
// When C++17 is available, we can use std::launder to provide mutable
// access to the key. Otherwise, we provide const access.
auto key() const
-> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
return PolicyTraits::mutable_key(this->slot());
}
mapped_type& mapped() const {

View file

@ -257,7 +257,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
template <int I>
ElemT<I>& get() & {
return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
return StorageT<I>::get();
}
template <int I>

View file

@ -15,25 +15,27 @@
#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#ifdef MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
#endif
#include <cassert>
#include <cstddef>
#include <memory>
#include <new>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@ -55,8 +57,11 @@ void* Allocate(Alloc* alloc, size_t n) {
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A mem_alloc(*alloc);
void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
@ -71,8 +76,11 @@ void Deallocate(Alloc* alloc, void* p, size_t n) {
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A mem_alloc(*alloc);
AT::deallocate(mem_alloc, static_cast<M*>(p),
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}
@ -209,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) {
// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
#ifdef ADDRESS_SANITIZER
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
#endif
#ifdef MEMORY_SANITIZER
#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s);
#endif
(void)m;
@ -220,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
}
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
#ifdef ADDRESS_SANITIZER
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif
#ifdef MEMORY_SANITIZER
#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s);
#endif
(void)m;
@ -351,6 +359,20 @@ struct map_slot_policy {
return slot->value;
}
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
static K& mutable_key(slot_type* slot) {
// Still check for kMutableKeys so that we can avoid calling std::launder
// unless necessary because it can interfere with optimizations.
return kMutableKeys::value ? slot->key
: *std::launder(const_cast<K*>(
std::addressof(slot->value.first)));
}
#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
static const K& mutable_key(slot_type* slot) { return key(slot); }
#endif
static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
}
@ -429,13 +451,6 @@ struct map_slot_policy {
std::move(src->value));
}
}
template <class Allocator>
static void move(Allocator* alloc, slot_type* first, slot_type* last,
slot_type* result) {
for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
move(alloc, src, dest);
}
};
} // namespace container_internal

View file

@ -337,11 +337,11 @@ ABSL_NAMESPACE_END
} // namespace absl
enum Hash : size_t {
kStd = 0x2, // std::hash
kStd = 0x1, // std::hash
#ifdef _MSC_VER
kExtension = kStd, // In MSVC, std::hash == ::hash
#else // _MSC_VER
kExtension = 0x4, // ::hash (GCC extension)
kExtension = 0x2, // ::hash (GCC extension)
#endif // _MSC_VER
};

View file

@ -41,8 +41,10 @@ class RandomDeviceSeedSeq {
} // namespace
std::mt19937_64* GetSharedRng() {
RandomDeviceSeedSeq seed_seq;
static auto* rng = new std::mt19937_64(seed_seq);
static auto* rng = [] {
RandomDeviceSeedSeq seed_seq;
return new std::mt19937_64(seed_seq);
}();
return rng;
}

View file

@ -17,6 +17,7 @@
#include <cstddef>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
@ -29,15 +30,34 @@ namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void>
struct hash_policy_traits {
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;
private:
struct ReturnKey {
// We return `Key` here.
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Key,
absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
static key_type& Impl(Key&& k, int) {
return *std::launder(
const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
}
#endif
template <class Key>
static Key Impl(Key&& k, char) {
return std::forward<Key>(k);
}
// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template <class Key, class... Args>
Key operator()(Key&& k, const Args&...) const {
return std::forward<Key>(k);
auto operator()(Key&& k, const Args&...) const
-> decltype(Impl(std::forward<Key>(k), 0)) {
return Impl(std::forward<Key>(k), 0);
}
};
@ -52,9 +72,6 @@ struct hash_policy_traits {
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;
// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
@ -156,7 +173,7 @@ struct hash_policy_traits {
// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template <class P = Policy>
static auto key(slot_type* slot)
static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot));
}

View file

@ -67,6 +67,7 @@ void HashtablezInfo::PrepareForSampling() {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
num_rehashes.store(0, std::memory_order_relaxed);
max_probe_length.store(0, std::memory_order_relaxed);
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);

View file

@ -73,6 +73,7 @@ struct HashtablezInfo {
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
@ -105,6 +106,11 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
#endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_rehashes.store(
1 + info->num_rehashes.load(std::memory_order_relaxed),
std::memory_order_relaxed);
}
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@ -113,7 +119,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
// This is a clear, reset the total/num_erases too.
RecordRehashSlow(info, 0);
info->total_probe_length.store(0, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
}
}
@ -122,12 +129,21 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
info->num_erases.fetch_add(1, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_erases.store(
1 + info->num_erases.load(std::memory_order_relaxed),
std::memory_order_relaxed);
}
HashtablezInfo* SampleSlow(int64_t* next_sample);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
@ -179,14 +195,27 @@ class HashtablezInfoHandle {
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
#else
// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
// be removed by the linker, in order to reduce the binary size.
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() = default;
explicit HashtablezInfoHandle(std::nullptr_t) {}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
inline void RecordErase() {}
friend inline void swap(HashtablezInfoHandle& /*lhs*/,
HashtablezInfoHandle& /*rhs*/) {}
};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
#endif // ABSL_PER_THREAD_TLS
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.

View file

@ -38,6 +38,7 @@ constexpr int kProbeLength = 8;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer {
public:
static bool IsSampled(const HashtablezInfoHandle& h) {
@ -46,6 +47,13 @@ class HashtablezInfoHandlePeer {
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
};
#else
class HashtablezInfoHandlePeer {
public:
static bool IsSampled(const HashtablezInfoHandle&) { return false; }
static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
namespace {
using ::absl::synchronization_internal::ThreadPool;
@ -76,6 +84,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@ -95,6 +104,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@ -167,9 +177,10 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
}
#if defined(ABSL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
@ -213,7 +224,6 @@ TEST(HashtablezSamplerTest, Sample) {
}
EXPECT_NEAR(sample_rate, 0.01, 0.005);
}
#endif
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = HashtablezSampler::Global();
@ -243,6 +253,8 @@ TEST(HashtablezSamplerTest, Handle) {
});
EXPECT_FALSE(found);
}
#endif
TEST(HashtablezSamplerTest, Registration) {
HashtablezSampler sampler;

View file

@ -462,6 +462,9 @@ class Storage {
Inlined inlined;
};
template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args);
Metadata metadata_;
Data data_;
};
@ -542,48 +545,42 @@ template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
StorageView storage_view = MakeStorageView();
IteratorValueAdapter<MoveIterator> move_values(
MoveIterator(storage_view.data));
AllocationTransaction allocation_tx(GetAllocPtr());
ConstructionTransaction construction_tx(GetAllocPtr());
absl::Span<value_type> construct_loop;
absl::Span<value_type> move_construct_loop;
absl::Span<value_type> destroy_loop;
if (new_size > storage_view.capacity) {
auto* const base = storage_view.data;
const size_type size = storage_view.size;
auto* alloc = GetAllocPtr();
if (new_size <= size) {
// Destroy extra old elements.
inlined_vector_internal::DestroyElements(alloc, base + new_size,
size - new_size);
} else if (new_size <= storage_view.capacity) {
// Construct new elements in place.
inlined_vector_internal::ConstructElements(alloc, base + size, &values,
new_size - size);
} else {
// Steps:
// a. Allocate new backing store.
// b. Construct new elements in new backing store.
// c. Move existing elements from old backing store to now.
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
AllocationTransaction allocation_tx(alloc);
size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
pointer new_data = allocation_tx.Allocate(new_capacity);
construct_loop = {new_data + storage_view.size,
new_size - storage_view.size};
move_construct_loop = {new_data, storage_view.size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
construct_loop = {storage_view.data + storage_view.size,
new_size - storage_view.size};
} else {
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
construction_tx.Construct(construct_loop.data(), &values,
construct_loop.size());
ConstructionTransaction construction_tx(alloc);
construction_tx.Construct(new_data + size, &values, new_size - size);
inlined_vector_internal::ConstructElements(
GetAllocPtr(), move_construct_loop.data(), &move_values,
move_construct_loop.size());
IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base)));
inlined_vector_internal::ConstructElements(alloc, new_data, &move_values,
size);
inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
destroy_loop.size());
construction_tx.Commit();
if (allocation_tx.DidAllocate()) {
inlined_vector_internal::DestroyElements(alloc, base, size);
construction_tx.Commit();
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
}
SetSize(new_size);
}
@ -684,44 +681,50 @@ template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
StorageView storage_view = MakeStorageView();
const auto n = storage_view.size;
if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
// Fast path; new element fits.
pointer last_ptr = storage_view.data + n;
AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
std::forward<Args>(args)...);
AddSize(1);
return *last_ptr;
}
// TODO(b/173712035): Annotate with musttail attribute to prevent regression.
return EmplaceBackSlow(std::forward<Args>(args)...);
}
template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference {
StorageView storage_view = MakeStorageView();
AllocationTransaction allocation_tx(GetAllocPtr());
IteratorValueAdapter<MoveIterator> move_values(
MoveIterator(storage_view.data));
pointer construct_data;
if (storage_view.size == storage_view.capacity) {
size_type new_capacity = NextCapacity(storage_view.capacity);
construct_data = allocation_tx.Allocate(new_capacity);
} else {
construct_data = storage_view.data;
}
size_type new_capacity = NextCapacity(storage_view.capacity);
pointer construct_data = allocation_tx.Allocate(new_capacity);
pointer last_ptr = construct_data + storage_view.size;
// Construct new element.
AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
std::forward<Args>(args)...);
if (allocation_tx.DidAllocate()) {
ABSL_INTERNAL_TRY {
inlined_vector_internal::ConstructElements(
GetAllocPtr(), allocation_tx.GetData(), &move_values,
storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
// Move elements from old backing store to new backing store.
ABSL_INTERNAL_TRY {
inlined_vector_internal::ConstructElements(
GetAllocPtr(), allocation_tx.GetData(), &move_values,
storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
AcquireAllocatedData(&allocation_tx);
SetIsAllocated();
AddSize(1);
return *last_ptr;
}

View file

@ -163,6 +163,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <ostream>
#include <string>
#include <tuple>
@ -170,15 +171,16 @@
#include <typeinfo>
#include <utility>
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif
@ -614,7 +616,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
#ifdef ADDRESS_SANITIZER
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {

View file

@ -17,6 +17,7 @@
// We need ::max_align_t because some libstdc++ versions don't provide
// std::max_align_t
#include <stddef.h>
#include <cstdint>
#include <memory>
#include <sstream>
@ -24,6 +25,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/types/span.h"
@ -126,8 +128,10 @@ TEST(Layout, ElementTypes) {
{
using L = Layout<int32_t, int32_t>;
SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>,
decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>,
decltype(L::Partial(0))::ElementTypes>();
}
{
using L = Layout<int8_t, int32_t, Int128>;
@ -366,18 +370,21 @@ TEST(Layout, PointerByIndex) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(
12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
}
@ -385,39 +392,44 @@ TEST(Layout, PointerByIndex) {
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
@ -426,7 +438,8 @@ TEST(Layout, PointerByIndex) {
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@ -437,75 +450,78 @@ TEST(Layout, PointerByType) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const Int128*>(
L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const Int128*>(
L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(
L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
}
}
@ -546,15 +562,18 @@ TEST(Layout, MutablePointerByIndex) {
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@ -566,48 +585,61 @@ TEST(Layout, MutablePointerByType) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(
24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
@ -788,67 +820,72 @@ TEST(Layout, SliceByIndexData) {
{
using L = Layout<int32_t>;
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
0, Distance(
p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
0, Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
0, Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
Distance(
p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(p,
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(12,
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
Distance(
p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
@ -862,7 +899,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
@ -876,7 +914,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(
@ -888,12 +927,14 @@ TEST(Layout, SliceByIndexData) {
p,
Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
8,
Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
@ -904,98 +945,94 @@ TEST(Layout, SliceByTypeData) {
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
p,
Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
p,
Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
p,
Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(1, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
L::Partial(5, 3).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(0, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
L::Partial(0, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(1, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(1, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
L::Partial(1, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(5, 3, 1).Slice<int8_t>(p))
.data()));
EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
L::Partial(5, 3, 1).Slice<Int128>(p))
.data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
L::Partial(5, 3, 1).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
Distance(p,
Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p,
Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
8,
Distance(
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
@ -1003,18 +1040,19 @@ TEST(Layout, MutableSliceByIndexData) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
@ -1023,55 +1061,63 @@ TEST(Layout, MutableSliceByIndexData) {
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
0, Distance(
p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
4, Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
0, Distance(
p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24, Distance(
p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
8, Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
@ -1080,66 +1126,84 @@ TEST(Layout, MutableSliceByTypeData) {
{
using L = Layout<int32_t>;
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
0, Distance(
p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4, Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
Distance(p,
Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
p,
Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(
@ -1148,14 +1212,16 @@ TEST(Layout, MutableSliceByTypeData) {
EXPECT_EQ(
8,
Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
p,
Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
@ -1254,17 +1320,17 @@ TEST(Layout, MutableSlices) {
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_THAT(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const L x(1, 2, 3);
EXPECT_THAT(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
@ -1314,7 +1380,7 @@ struct Region {
};
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
#ifdef ADDRESS_SANITIZER
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
}
@ -1396,7 +1462,8 @@ TEST(Layout, DebugString) {
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
@ -1404,7 +1471,8 @@ TEST(Layout, DebugString) {
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +

View file

@ -27,7 +27,7 @@ constexpr size_t Group::kWidth;
// Returns "random" seed.
inline size_t RandomSeed() {
#if ABSL_HAVE_THREAD_LOCAL
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
@ -43,6 +43,19 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl

View file

@ -122,6 +122,16 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
std::true_type /* propagate_on_container_swap */) {
using std::swap;
swap(lhs, rhs);
}
template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
template <size_t Width>
class probe_seq {
public:
@ -169,10 +179,14 @@ struct IsDecomposable<
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
constexpr bool IsNoThrowSwappable() {
constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
template <class T>
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
return false;
}
template <typename T>
int TrailingZeros(T x) {
@ -458,17 +472,7 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// DELETED -> EMPTY
// EMPTY -> EMPTY
// FULL -> DELETED
inline void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
@ -497,6 +501,76 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
inline void AssertIsFull(ctrl_t* ctrl) {
ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
"Invalid operation on iterator. The element might have "
"been erased, or the table might have rehashed.");
}
inline void AssertIsValid(ctrl_t* ctrl) {
ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
"Invalid operation on iterator. The element might have "
"been erased, or the table might have rehashed.");
}
struct FindInfo {
size_t offset;
size_t probe_length;
};
// The representation of the object has two modes:
// - small: For capacities < kWidth-1
// - large: For the rest.
//
// Differences:
// - In small mode we are able to use the whole capacity. The extra control
// bytes give us at least one "empty" control byte to stop the iteration.
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
Group g{ctrl + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
assert(seq.index() < capacity && "full table!");
}
}
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@ -511,7 +585,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
// Allocator: an Allocator
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
@ -617,7 +692,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
assert_is_full();
AssertIsFull(ctrl_);
return PolicyTraits::element(slot_);
}
@ -626,7 +701,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
iterator& operator++() {
assert_is_full();
AssertIsFull(ctrl_);
++ctrl_;
++slot_;
skip_empty_or_deleted();
@ -640,8 +715,8 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
a.assert_is_valid();
b.assert_is_valid();
AssertIsValid(a.ctrl_);
AssertIsValid(b.ctrl_);
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@ -655,13 +730,6 @@ class raw_hash_set {
ABSL_INTERNAL_ASSUME(ctrl != nullptr);
}
void assert_is_full() const {
ABSL_HARDENING_ASSERT(ctrl_ != nullptr && IsFull(*ctrl_));
}
void assert_is_valid() const {
ABSL_HARDENING_ASSERT(ctrl_ == nullptr || IsFull(*ctrl_));
}
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@ -730,7 +798,6 @@ class raw_hash_set {
: ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
reset_growth_left();
initialize_slots();
}
}
@ -836,7 +903,7 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
auto target = find_first_non_full(hash);
auto target = find_first_non_full(ctrl_, hash, capacity_);
set_ctrl(target.offset, H2(hash));
emplace_at(target.offset, v);
infoz_.RecordInsert(hash, target.probe_length);
@ -1045,7 +1112,9 @@ class raw_hash_set {
}
iterator insert(const_iterator, node_type&& node) {
return insert(std::move(node)).first;
auto res = insert(std::move(node));
node = std::move(res.node);
return res.position;
}
// This overload kicks in if we can deduce the key from args. This enables us
@ -1174,7 +1243,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
it.assert_is_full();
AssertIsFull(it.ctrl_);
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@ -1208,7 +1277,7 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
position.inner_.assert_is_full();
AssertIsFull(position.inner_.ctrl_);
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@ -1225,8 +1294,8 @@ class raw_hash_set {
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
(!AllocTraits::propagate_on_container_swap::value ||
IsNoThrowSwappable<allocator_type>())) {
IsNoThrowSwappable<allocator_type>(
typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
@ -1236,12 +1305,8 @@ class raw_hash_set {
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
swap(infoz_, that.infoz_);
if (AllocTraits::propagate_on_container_swap::value) {
swap(alloc_ref(), that.alloc_ref());
} else {
// If the allocators do not compare equal it is officially undefined
// behavior. We choose to do nothing.
}
SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
@ -1260,7 +1325,12 @@ class raw_hash_set {
}
}
void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
void reserve(size_t n) {
size_t m = GrowthToLowerboundCapacity(n);
if (m > capacity_) {
resize(NormalizeCapacity(m));
}
}
// Extension API: support for heterogeneous keys.
//
@ -1285,7 +1355,7 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const {
(void)key;
#if defined(__GNUC__)
auto seq = probe(hash_ref()(key));
auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__
@ -1300,7 +1370,7 @@ class raw_hash_set {
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
auto seq = probe(hash);
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@ -1311,6 +1381,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
assert(seq.index() < capacity_ && "full table!");
}
}
template <class K = key_type>
@ -1521,7 +1592,7 @@ class raw_hash_set {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
auto target = find_first_non_full(hash);
auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
set_ctrl(new_i, H2(hash));
@ -1540,7 +1611,7 @@ class raw_hash_set {
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
assert(!is_small());
assert(!is_small(capacity_));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@ -1565,7 +1636,7 @@ class raw_hash_set {
if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i));
auto target = find_first_non_full(hash);
auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
@ -1573,7 +1644,8 @@ class raw_hash_set {
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
const auto probe_index = [&](size_t pos) {
return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
Group::kWidth;
};
// Element doesn't move.
@ -1617,7 +1689,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
auto seq = probe(hash);
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@ -1632,41 +1704,6 @@ class raw_hash_set {
return false;
}
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
struct FindInfo {
size_t offset;
size_t probe_length;
};
FindInfo find_first_non_full(size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
assert(seq.index() < capacity_ && "full table!");
seq.next();
}
}
// TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that));
@ -1683,7 +1720,7 @@ class raw_hash_set {
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
auto hash = hash_ref()(key);
auto seq = probe(hash);
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@ -1694,16 +1731,17 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
assert(seq.index() < capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(hash);
auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
target = find_first_non_full(hash);
target = find_first_non_full(ctrl_, hash, capacity_);
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
@ -1736,10 +1774,6 @@ class raw_hash_set {
private:
friend struct RawHashSetTestOnlyAccess;
probe_seq<Group::kWidth> probe(size_t hash) const {
return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
}
// Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl() {
std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
@ -1769,22 +1803,6 @@ class raw_hash_set {
size_t& growth_left() { return settings_.template get<0>(); }
// The representation of the object has two modes:
// - small: For capacities < kWidth-1
// - large: For the rest.
//
// Differences:
// - In small mode we are able to use the whole capacity. The extra control
// bytes give us at least one "empty" control byte to stop the iteration.
// This is important to make 1 a valid capacity.
//
// - In small mode only the first `capacity()` control bytes after the
// sentinel are valid. The rest contain dummy kEmpty values that do not
// represent a real slot. This is important to take into account on
// find_first_non_full(), where we never try ShouldInsertBackwards() for
// small tables.
bool is_small() const { return capacity_ < Group::kWidth - 1; }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
@ -1828,7 +1846,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
auto seq = set.probe(hash);
auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) {

View file

@ -424,6 +424,81 @@ TEST_F(PropagateOnAll, Swap) {
EXPECT_EQ(0, it->num_copies());
}
// This allocator is similar to std::pmr::polymorphic_allocator.
// Note the disabled assignment.
template <class T>
class PAlloc {
template <class>
friend class PAlloc;
public:
// types
using value_type = T;
// traits
using propagate_on_container_swap = std::false_type;
PAlloc() noexcept = default;
explicit PAlloc(size_t id) noexcept : id_(id) {}
PAlloc(const PAlloc&) noexcept = default;
PAlloc& operator=(const PAlloc&) noexcept = delete;
template <class U>
PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {} // NOLINT
template <class U>
struct rebind {
using other = PAlloc<U>;
};
constexpr PAlloc select_on_container_copy_construction() const { return {}; }
// public member functions
T* allocate(size_t) { return new T; }
void deallocate(T* p, size_t) { delete p; }
friend bool operator==(const PAlloc& a, const PAlloc& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
private:
size_t id_ = std::numeric_limits<size_t>::max();
};
// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
__GNUC_MINOR__ != 5)
TEST(NoPropagateOn, Swap) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(PA{2});
swap(t1, t2);
EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA(2));
}
#endif
TEST(NoPropagateOn, CopyConstruct) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(t1);
EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA());
}
TEST(NoPropagateOn, Assignment) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(PA{2});
t1 = t2;
EXPECT_EQ(t1.get_allocator(), PA(1));
EXPECT_EQ(t2.get_allocator(), PA(2));
}
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END

View file

@ -26,6 +26,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
@ -846,7 +847,8 @@ TEST(Table, EraseMaintainsValidIterator) {
std::vector<int64_t> CollectBadMergeKeys(size_t N) {
static constexpr int kGroupSize = Group::kWidth - 1;
auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
auto topk_range = [](size_t b, size_t e,
IntTable* t) -> std::vector<int64_t> {
for (size_t i = b; i != e; ++i) {
t->emplace(i);
}
@ -1000,8 +1002,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
// 1. Create new table and reserve it to keys.size() * 2
// 2. Insert all keys xored with seed
// 3. Collect ProbeStats from final table.
ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
size_t num_iters) {
ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
const std::vector<int64_t>& keys, size_t num_iters) {
const size_t reserve_size = keys.size() * 2;
ProbeStats stats;
@ -1709,6 +1711,26 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node);
}
TEST(Nodes, HintInsert) {
IntTable t = {1, 2, 3};
auto node = t.extract(1);
EXPECT_THAT(t, UnorderedElementsAre(2, 3));
auto it = t.insert(t.begin(), std::move(node));
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
EXPECT_EQ(*it, 1);
EXPECT_FALSE(node);
node = t.extract(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 3));
// reinsert 2 to make the next insert fail.
t.insert(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
it = t.insert(t.begin(), std::move(node));
EXPECT_EQ(*it, 2);
// The node was not emptied by the insert call.
EXPECT_TRUE(node);
}
IntTable MakeSimpleTable(size_t size) {
IntTable t;
while (t.size() < size) t.insert(t.size());
@ -1791,11 +1813,11 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
constexpr char kDeathMsg[] = "IsFull";
constexpr char kDeathMsg[] = "Invalid operation on iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
#if defined(ABSL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) {
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
@ -1816,7 +1838,7 @@ TEST(RawHashSamplerTest, Sample) {
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
}
#endif // ABSL_HASHTABLEZ_SAMPLER
#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
// Enable the feature even if the prod default is off.
@ -1839,7 +1861,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
0.00, 0.001);
}
#ifdef ADDRESS_SANITIZER
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
t.reserve(5);
@ -1863,7 +1885,7 @@ TEST(Sanitizer, PoisoningOnErase) {
t.erase(0);
EXPECT_TRUE(__asan_address_is_poisoned(&v));
}
#endif // ADDRESS_SANITIZER
#endif // ABSL_HAVE_ADDRESS_SANITIZER
} // namespace
} // namespace container_internal