merge(3p/immer): Subtree merge at 'ad3e3556d' as 'third_party/immer'

Change-Id: I9636a41ad44b4218293833fd3e9456d9b07c731b
This commit is contained in:
Vincent Ambo 2020-07-15 08:20:18 +01:00
commit 1213b086a1
311 changed files with 74223 additions and 0 deletions

212
third_party/immer/immer/algorithm.hpp vendored Normal file
View file

@ -0,0 +1,212 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <algorithm>
#include <numeric>
#include <type_traits>
namespace immer {
/**
* @defgroup algorithm
* @{
*/
/*@{*/
// Right now these algorithms dispatch directly to the vector
// implementations unconditionally. This will be changed in the
// future to support other kinds of containers.
/*!
* Apply operation `fn` for every contiguous *chunk* of data in the
* range sequentially. Each time, `Fn` is passed two `value_type`
* pointers describing a range over a part of the vector. This allows
* iterating over the elements in the most efficient way.
*
* @rst
*
* .. tip:: This is a low level method. Most of the time, :doc:`other
* wrapper algorithms <algorithms>` should be used instead.
*
* @endrst
*/
template <typename Range, typename Fn>
void for_each_chunk(const Range& r, Fn&& fn)
{
r.impl().for_each_chunk(std::forward<Fn>(fn));
}
template <typename Iterator, typename Fn>
void for_each_chunk(const Iterator& first, const Iterator& last, Fn&& fn)
{
assert(&first.impl() == &last.impl());
first.impl().for_each_chunk(
first.index(), last.index(), std::forward<Fn>(fn));
}
template <typename T, typename Fn>
void for_each_chunk(const T* first, const T* last, Fn&& fn)
{
std::forward<Fn>(fn)(first, last);
}
/*!
* Apply operation `fn` for every contiguous *chunk* of data in the
* range sequentially, until `fn` returns `false`. Each time, `Fn` is
* passed two `value_type` pointers describing a range over a part of
* the vector. This allows iterating over the elements in the most
* efficient way.
*
* @rst
*
* .. tip:: This is a low level method. Most of the time, :doc:`other
* wrapper algorithms <algorithms>` should be used instead.
*
* @endrst
*/
template <typename Range, typename Fn>
bool for_each_chunk_p(const Range& r, Fn&& fn)
{
return r.impl().for_each_chunk_p(std::forward<Fn>(fn));
}
template <typename Iterator, typename Fn>
bool for_each_chunk_p(const Iterator& first, const Iterator& last, Fn&& fn)
{
assert(&first.impl() == &last.impl());
return first.impl().for_each_chunk_p(
first.index(), last.index(), std::forward<Fn>(fn));
}
template <typename T, typename Fn>
bool for_each_chunk_p(const T* first, const T* last, Fn&& fn)
{
return std::forward<Fn>(fn)(first, last);
}
/*!
* Equivalent of `std::accumulate` applied to the range `r`.
*/
template <typename Range, typename T>
T accumulate(Range&& r, T init)
{
for_each_chunk(r, [&](auto first, auto last) {
init = std::accumulate(first, last, init);
});
return init;
}
template <typename Range, typename T, typename Fn>
T accumulate(Range&& r, T init, Fn fn)
{
for_each_chunk(r, [&](auto first, auto last) {
init = std::accumulate(first, last, init, fn);
});
return init;
}
/*!
* Equivalent of `std::accumulate` applied to the range @f$ [first,
* last) @f$.
*/
template <typename Iterator, typename T>
T accumulate(Iterator first, Iterator last, T init)
{
for_each_chunk(first, last, [&](auto first, auto last) {
init = std::accumulate(first, last, init);
});
return init;
}
template <typename Iterator, typename T, typename Fn>
T accumulate(Iterator first, Iterator last, T init, Fn fn)
{
for_each_chunk(first, last, [&](auto first, auto last) {
init = std::accumulate(first, last, init, fn);
});
return init;
}
/*!
* Equivalent of `std::for_each` applied to the range `r`.
*/
template <typename Range, typename Fn>
Fn&& for_each(Range&& r, Fn&& fn)
{
for_each_chunk(r, [&](auto first, auto last) {
for (; first != last; ++first)
fn(*first);
});
return std::forward<Fn>(fn);
}
/*!
* Equivalent of `std::for_each` applied to the range @f$ [first,
* last) @f$.
*/
template <typename Iterator, typename Fn>
Fn&& for_each(Iterator first, Iterator last, Fn&& fn)
{
for_each_chunk(first, last, [&](auto first, auto last) {
for (; first != last; ++first)
fn(*first);
});
return std::forward<Fn>(fn);
}
/*!
* Equivalent of `std::copy` applied to the range `r`.
*/
template <typename Range, typename OutIter>
OutIter copy(Range&& r, OutIter out)
{
for_each_chunk(
r, [&](auto first, auto last) { out = std::copy(first, last, out); });
return out;
}
/*!
* Equivalent of `std::copy` applied to the range @f$ [first,
* last) @f$.
*/
template <typename InIter, typename OutIter>
OutIter copy(InIter first, InIter last, OutIter out)
{
for_each_chunk(first, last, [&](auto first, auto last) {
out = std::copy(first, last, out);
});
return out;
}
/*!
* Equivalent of `std::all_of` applied to the range `r`.
*/
template <typename Range, typename Pred>
bool all_of(Range&& r, Pred p)
{
return for_each_chunk_p(
r, [&](auto first, auto last) { return std::all_of(first, last, p); });
}
/*!
* Equivalent of `std::all_of` applied to the range @f$ [first, last)
* @f$.
*/
template <typename Iter, typename Pred>
bool all_of(Iter first, Iter last, Pred p)
{
return for_each_chunk_p(first, last, [&](auto first, auto last) {
return std::all_of(first, last, p);
});
}
/** @} */ // group: algorithm
} // namespace immer

364
third_party/immer/immer/array.hpp vendored Normal file
View file

@ -0,0 +1,364 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/arrays/with_capacity.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
template <typename T, typename MemoryPolicy>
class array_transient;
/*!
* Immutable container that stores a sequence of elements in
* contiguous memory.
*
* @tparam T The type of the values to be stored in the container.
*
* @rst
*
* It supports the most efficient iteration and random access,
* equivalent to a ``std::vector`` or ``std::array``, but all
* manipulations are :math:`O(size)`.
*
* .. tip:: Don't be fooled by the bad complexity of this data
* structure. It is a great choice for short sequence or when it
* is seldom or never changed. This depends on the ``sizeof(T)``
* and the expensiveness of its ``T``'s copy constructor, in case
* of doubt, measure. For basic types, using an `array` when
* :math:`n < 100` is a good heuristic.
*
* @endrst
*/
template <typename T, typename MemoryPolicy = default_memory_policy>
class array
{
using impl_t =
std::conditional_t<MemoryPolicy::use_transient_rvalues,
detail::arrays::with_capacity<T, MemoryPolicy>,
detail::arrays::no_capacity<T, MemoryPolicy>>;
using move_t =
std::integral_constant<bool, MemoryPolicy::use_transient_rvalues>;
public:
using value_type = T;
using reference = const T&;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = const T*;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using memory_policy = MemoryPolicy;
using transient_type = array_transient<T, MemoryPolicy>;
/*!
* Default constructor. It creates an array of `size() == 0`. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
array() = default;
/*!
* Constructs an array containing the elements in `values`.
*/
array(std::initializer_list<T> values)
: impl_{impl_t::from_initializer_list(values)}
{}
/*!
* Constructs a array containing the elements in the range
* defined by the forward iterator `first` and range sentinel `last`.
*/
template <typename Iter,
typename Sent,
std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent> &&
detail::is_forward_iterator_v<Iter>,
bool> = true>
array(Iter first, Sent last)
: impl_{impl_t::from_range(first, last)}
{}
/*!
* Constructs a array containing the element `val` repeated `n`
* times.
*/
array(size_type n, T v = {})
: impl_{impl_t::from_fill(n, v)}
{}
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return impl_.data(); }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const { return impl_.data() + impl_.size; }
/*!
* Returns an iterator that traverses the collection backwards,
* pointing at the first element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rbegin() const
{
return reverse_iterator{end()};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing after the last element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rend() const
{
return reverse_iterator{begin()};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD std::size_t size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Access the raw data.
*/
IMMER_NODISCARD const T* data() const { return impl_.data(); }
/*!
* Access the last element.
*/
IMMER_NODISCARD const T& back() const { return data()[size() - 1]; }
/*!
* Access the first element.
*/
IMMER_NODISCARD const T& front() const { return data()[0]; }
/*!
* Returns a `const` reference to the element at position `index`.
* It is undefined when @f$ 0 index \geq size() @f$. It does not
* allocate memory and its complexity is *effectively* @f$ O(1)
* @f$.
*/
IMMER_NODISCARD reference operator[](size_type index) const
{
return impl_.get(index);
}
/*!
* Returns a `const` reference to the element at position
* `index`. It throws an `std::out_of_range` exception when @f$
* index \geq size() @f$. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
reference at(size_type index) const { return impl_.get_check(index); }
/*!
* Returns whether the vectors are equal.
*/
IMMER_NODISCARD bool operator==(const array& other) const
{
return impl_.equals(other.impl_);
}
IMMER_NODISCARD bool operator!=(const array& other) const
{
return !(*this == other);
}
/*!
* Returns an array with `value` inserted at the end. It may
* allocate memory and its complexity is @f$ O(size) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/array/array.cpp
* :language: c++
* :dedent: 8
* :start-after: push-back/start
* :end-before: push-back/end
*
* @endrst
*/
IMMER_NODISCARD array push_back(value_type value) const&
{
return impl_.push_back(std::move(value));
}
IMMER_NODISCARD decltype(auto) push_back(value_type value) &&
{
return push_back_move(move_t{}, std::move(value));
}
/*!
* Returns an array containing value `value` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is @f$ O(size) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/array/array.cpp
* :language: c++
* :dedent: 8
* :start-after: set/start
* :end-before: set/end
*
* @endrst
*/
IMMER_NODISCARD array set(std::size_t index, value_type value) const&
{
return impl_.assoc(index, std::move(value));
}
IMMER_NODISCARD decltype(auto) set(size_type index, value_type value) &&
{
return set_move(move_t{}, index, std::move(value));
}
/*!
* Returns an array containing the result of the expression
* `fn((*this)[idx])` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is @f$ O(size) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/array/array.cpp
* :language: c++
* :dedent: 8
* :start-after: update/start
* :end-before: update/end
*
* @endrst
*/
template <typename FnT>
IMMER_NODISCARD array update(std::size_t index, FnT&& fn) const&
{
return impl_.update(index, std::forward<FnT>(fn));
}
template <typename FnT>
IMMER_NODISCARD decltype(auto) update(size_type index, FnT&& fn) &&
{
return update_move(move_t{}, index, std::forward<FnT>(fn));
}
/*!
* Returns a array containing only the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/array/array.cpp
* :language: c++
* :dedent: 8
* :start-after: take/start
* :end-before: take/end
*
* @endrst
*/
IMMER_NODISCARD array take(size_type elems) const&
{
return impl_.take(elems);
}
IMMER_NODISCARD decltype(auto) take(size_type elems) &&
{
return take_move(move_t{}, elems);
}
/*!
* Returns an @a transient form of this container, an
* `immer::array_transient`.
*/
IMMER_NODISCARD transient_type transient() const&
{
return transient_type{impl_};
}
IMMER_NODISCARD transient_type transient() &&
{
return transient_type{std::move(impl_)};
}
// Semi-private
const impl_t& impl() const { return impl_; }
private:
friend transient_type;
array(impl_t impl)
: impl_(std::move(impl))
{}
array&& push_back_move(std::true_type, value_type value)
{
impl_.push_back_mut({}, std::move(value));
return std::move(*this);
}
array push_back_move(std::false_type, value_type value)
{
return impl_.push_back(std::move(value));
}
array&& set_move(std::true_type, size_type index, value_type value)
{
impl_.assoc_mut({}, index, std::move(value));
return std::move(*this);
}
array set_move(std::false_type, size_type index, value_type value)
{
return impl_.assoc(index, std::move(value));
}
template <typename Fn>
array&& update_move(std::true_type, size_type index, Fn&& fn)
{
impl_.update_mut({}, index, std::forward<Fn>(fn));
return std::move(*this);
}
template <typename Fn>
array update_move(std::false_type, size_type index, Fn&& fn)
{
return impl_.update(index, std::forward<Fn>(fn));
}
array&& take_move(std::true_type, size_type elems)
{
impl_.take_mut({}, elems);
return std::move(*this);
}
array take_move(std::false_type, size_type elems)
{
return impl_.take(elems);
}
impl_t impl_ = impl_t::empty();
};
} /* namespace immer */

View file

@ -0,0 +1,202 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/arrays/with_capacity.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
template <typename T, typename MemoryPolicy>
class array;
/*!
* Mutable version of `immer::array`.
*
* @rst
*
* Refer to :doc:`transients` to learn more about when and how to use
* the mutable versions of immutable containers.
*
* @endrst
*/
template <typename T, typename MemoryPolicy = default_memory_policy>
class array_transient : MemoryPolicy::transience_t::owner
{
using impl_t = detail::arrays::with_capacity<T, MemoryPolicy>;
using impl_no_capacity_t = detail::arrays::no_capacity<T, MemoryPolicy>;
using owner_t = typename MemoryPolicy::transience_t::owner;
public:
using value_type = T;
using reference = const T&;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = const T*;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using memory_policy = MemoryPolicy;
using persistent_type = array<T, MemoryPolicy>;
/*!
* Default constructor. It creates a mutable array of `size() ==
* 0`. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
array_transient() = default;
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return impl_.data(); }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const { return impl_.data() + impl_.size; }
/*!
* Returns an iterator that traverses the collection backwards,
* pointing at the first element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rbegin() const
{
return reverse_iterator{end()};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing after the last element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rend() const
{
return reverse_iterator{begin()};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD std::size_t size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Access the raw data.
*/
IMMER_NODISCARD const T* data() const { return impl_.data(); }
/*!
* Provide mutable access to the raw underlaying data.
*/
IMMER_NODISCARD T* data_mut() { return impl_.data_mut(*this); }
/*!
* Access the last element.
*/
IMMER_NODISCARD const T& back() const { return data()[size() - 1]; }
/*!
* Access the first element.
*/
IMMER_NODISCARD const T& front() const { return data()[0]; }
/*!
* Returns a `const` reference to the element at position `index`.
* It is undefined when @f$ 0 index \geq size() @f$. It does not
* allocate memory and its complexity is *effectively* @f$ O(1)
* @f$.
*/
reference operator[](size_type index) const { return impl_.get(index); }
/*!
* Returns a `const` reference to the element at position
* `index`. It throws an `std::out_of_range` exception when @f$
* index \geq size() @f$. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
reference at(size_type index) const { return impl_.get_check(index); }
/*!
* Inserts `value` at the end. It may allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
void push_back(value_type value)
{
impl_.push_back_mut(*this, std::move(value));
}
/*!
* Sets to the value `value` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void set(size_type index, value_type value)
{
impl_.assoc_mut(*this, index, std::move(value));
}
/*!
* Updates the array to contain the result of the expression
* `fn((*this)[idx])` at position `idx`.
* Undefined for `0 >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
template <typename FnT>
void update(size_type index, FnT&& fn)
{
impl_.update_mut(*this, index, std::forward<FnT>(fn));
}
/*!
* Resizes the array to only contain the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void take(size_type elems) { impl_.take_mut(*this, elems); }
/*!
* Returns an @a immutable form of this container, an
* `immer::array`.
*/
IMMER_NODISCARD persistent_type persistent() &
{
this->owner_t::operator=(owner_t{});
return persistent_type{impl_};
}
IMMER_NODISCARD persistent_type persistent() &&
{
return persistent_type{std::move(impl_)};
}
private:
friend persistent_type;
array_transient(impl_t impl)
: impl_(std::move(impl))
{}
impl_t impl_ = impl_t::empty();
};
} // namespace immer

254
third_party/immer/immer/atom.hpp vendored Normal file
View file

@ -0,0 +1,254 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/box.hpp>
#include <immer/refcount/no_refcount_policy.hpp>
#include <atomic>
#include <type_traits>
namespace immer {
namespace detail {
template <typename T, typename MemoryPolicy>
struct refcount_atom_impl
{
using box_type = box<T, MemoryPolicy>;
using value_type = T;
using memory_policy = MemoryPolicy;
using spinlock_t = typename MemoryPolicy::refcount::spinlock_type;
using scoped_lock_t = typename spinlock_t::scoped_lock;
refcount_atom_impl(const refcount_atom_impl&) = delete;
refcount_atom_impl(refcount_atom_impl&&) = delete;
refcount_atom_impl& operator=(const refcount_atom_impl&) = delete;
refcount_atom_impl& operator=(refcount_atom_impl&&) = delete;
refcount_atom_impl(box_type b)
: impl_{std::move(b)}
{}
box_type load() const
{
scoped_lock_t lock{lock_};
return impl_;
}
void store(box_type b)
{
scoped_lock_t lock{lock_};
impl_ = std::move(b);
}
box_type exchange(box_type b)
{
{
scoped_lock_t lock{lock_};
swap(b, impl_);
}
return b;
}
template <typename Fn>
box_type update(Fn&& fn)
{
while (true) {
auto oldv = load();
auto newv = oldv.update(fn);
{
scoped_lock_t lock{lock_};
if (oldv.impl_ == impl_.impl_) {
impl_ = newv;
return {newv};
}
}
}
}
private:
mutable spinlock_t lock_;
box_type impl_;
};
template <typename T, typename MemoryPolicy>
struct gc_atom_impl
{
using box_type = box<T, MemoryPolicy>;
using value_type = T;
using memory_policy = MemoryPolicy;
static_assert(std::is_same<typename MemoryPolicy::refcount,
no_refcount_policy>::value,
"gc_atom_impl can only be used when there is no refcount!");
gc_atom_impl(const gc_atom_impl&) = delete;
gc_atom_impl(gc_atom_impl&&) = delete;
gc_atom_impl& operator=(const gc_atom_impl&) = delete;
gc_atom_impl& operator=(gc_atom_impl&&) = delete;
gc_atom_impl(box_type b)
: impl_{b.impl_}
{}
box_type load() const { return {impl_.load()}; }
void store(box_type b) { impl_.store(b.impl_); }
box_type exchange(box_type b) { return {impl_.exchange(b.impl_)}; }
template <typename Fn>
box_type update(Fn&& fn)
{
while (true) {
auto oldv = box_type{impl_.load()};
auto newv = oldv.update(fn);
if (impl_.compare_exchange_weak(oldv.impl_, newv.impl_))
return {newv};
}
}
private:
std::atomic<typename box_type::holder*> impl_;
};
} // namespace detail
/*!
* Stores for boxed values of type `T` in a thread-safe manner.
*
* @see box
*
* @rst
*
* .. warning:: If memory policy used includes thread unsafe reference counting,
* no no thread safety is assumed, and the atom becomes thread unsafe too!
*
* .. note:: ``box<T>`` provides a value based box of type ``T``, this is, we
* can think about it as a value-based version of ``std::shared_ptr``. In a
* similar fashion, ``atom<T>`` is in spirit the value-based equivalent of
* C++20 ``std::atomic_shared_ptr``. However, the API does not follow
* ``std::atomic`` interface closely, since it attempts to be a higher level
* construction, most similar to Clojure's ``(atom)``. It is remarkable in
* particular that, since ``box<T>`` underlying object is immutable, using
* ``atom<T>`` is fully thread-safe in ways that ``std::atmic_shared_ptr`` is
* not. This is so because dereferencing the underlying pointer in a
* ``std::atomic_share_ptr`` may require further synchronization, in
* particular when invoking non-const methods.
*
* @endrst
*/
template <typename T, typename MemoryPolicy = default_memory_policy>
class atom
{
public:
using box_type = box<T, MemoryPolicy>;
using value_type = T;
using memory_policy = MemoryPolicy;
atom(const atom&) = delete;
atom(atom&&) = delete;
void operator=(const atom&) = delete;
void operator=(atom&&) = delete;
/*!
* Constructs an atom holding a value `b`;
*/
atom(box_type v = {})
: impl_{std::move(v)}
{}
/*!
* Sets a new value in the atom.
*/
atom& operator=(box_type b)
{
impl_.store(std::move(b));
return *this;
}
/*!
* Reads the currently stored value in a thread-safe manner.
*/
operator box_type() const { return impl_.load(); }
/*!
* Reads the currently stored value in a thread-safe manner.
*/
operator value_type() const { return *impl_.load(); }
/*!
* Reads the currently stored value in a thread-safe manner.
*/
IMMER_NODISCARD box_type load() const { return impl_.load(); }
/*!
* Stores a new value in a thread-safe manner.
*/
void store(box_type b) { impl_.store(std::move(b)); }
/*!
* Stores a new value and returns the old value, in a thread-safe manner.
*/
IMMER_NODISCARD box_type exchange(box_type b)
{
return impl_.exchange(std::move(b));
}
/*!
* Stores the result of applying `fn` to the current value atomically and
* returns the new resulting value.
*
* @rst
*
* .. warning:: ``fn`` must be a pure function and have no side effects! The
* function might be evaluated multiple times when multiple threads
* content to update the value.
*
* @endrst
*/
template <typename Fn>
box_type update(Fn&& fn)
{
return impl_.update(std::forward<Fn>(fn));
}
private:
struct get_refcount_atom_impl
{
template <typename U, typename MP>
struct apply
{
using type = detail::refcount_atom_impl<U, MP>;
};
};
struct get_gc_atom_impl
{
template <typename U, typename MP>
struct apply
{
using type = detail::gc_atom_impl<U, MP>;
};
};
// If we are using "real" garbage collection (we assume this when we use
// `no_refcount_policy`), we just store the pointer in an atomic. If we use
// reference counting, we rely on the reference counting spinlock.
using impl_t = typename std::conditional_t<
std::is_same<typename MemoryPolicy::refcount,
no_refcount_policy>::value,
get_gc_atom_impl,
get_refcount_atom_impl>::template apply<T, MemoryPolicy>::type;
impl_t impl_;
};
} // namespace immer

194
third_party/immer/immer/box.hpp vendored Normal file
View file

@ -0,0 +1,194 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/util.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
namespace detail {
template <typename U, typename MP>
struct gc_atom_impl;
template <typename U, typename MP>
struct refcount_atom_impl;
} // namespace detail
/*!
* Immutable box for a single value of type `T`.
*
* The box is always copiable and movable. The `T` copy or move
* operations are never called. Since a box is immutable, copying or
* moving just copy the underlying pointers.
*/
template <typename T, typename MemoryPolicy = default_memory_policy>
class box
{
friend struct detail::gc_atom_impl<T, MemoryPolicy>;
friend struct detail::refcount_atom_impl<T, MemoryPolicy>;
struct holder : MemoryPolicy::refcount
{
T value;
template <typename... Args>
holder(Args&&... args)
: value{std::forward<Args>(args)...}
{}
};
using heap = typename MemoryPolicy::heap::type;
holder* impl_ = nullptr;
box(holder* impl)
: impl_{impl}
{}
public:
using value_type = T;
using memory_policy = MemoryPolicy;
/*!
* Constructs a box holding `T{}`.
*/
box()
: impl_{detail::make<heap, holder>()}
{}
/*!
* Constructs a box holding `T{arg}`
*/
template <typename Arg,
typename Enable = std::enable_if_t<
!std::is_same<box, std::decay_t<Arg>>::value &&
std::is_constructible<T, Arg>::value>>
box(Arg&& arg)
: impl_{detail::make<heap, holder>(std::forward<Arg>(arg))}
{}
/*!
* Constructs a box holding `T{arg1, arg2, args...}`
*/
template <typename Arg1, typename Arg2, typename... Args>
box(Arg1&& arg1, Arg2&& arg2, Args&&... args)
: impl_{detail::make<heap, holder>(std::forward<Arg1>(arg1),
std::forward<Arg2>(arg2),
std::forward<Args>(args)...)}
{}
friend void swap(box& a, box& b)
{
using std::swap;
swap(a.impl_, b.impl_);
}
box(box&& other) { swap(*this, other); }
box(const box& other)
: impl_(other.impl_)
{
impl_->inc();
}
box& operator=(box&& other)
{
swap(*this, other);
return *this;
}
box& operator=(const box& other)
{
auto aux = other;
swap(*this, aux);
return *this;
}
~box()
{
if (impl_ && impl_->dec()) {
impl_->~holder();
heap::deallocate(sizeof(holder), impl_);
}
}
/*! Query the current value. */
IMMER_NODISCARD const T& get() const { return impl_->value; }
/*! Conversion to the boxed type. */
operator const T&() const { return get(); }
/*! Access via dereference */
const T& operator*() const { return get(); }
/*! Access via pointer member access */
const T* operator->() const { return &get(); }
/*! Comparison. */
IMMER_NODISCARD bool operator==(detail::exact_t<const box&> other) const
{
return impl_ == other.value.impl_ || get() == other.value.get();
}
// Note that the `exact_t` disambiguates comparisons against `T{}`
// directly. In that case we want to use `operator T&` and
// compare directly. We definitely never want to convert a value
// to a box (which causes an allocation) just to compare it.
IMMER_NODISCARD bool operator!=(detail::exact_t<const box&> other) const
{
return !(*this == other.value);
}
IMMER_NODISCARD bool operator<(detail::exact_t<const box&> other) const
{
return get() < other.value.get();
}
/*!
* Returns a new box built by applying the `fn` to the underlying
* value.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/box/box.cpp
* :language: c++
* :dedent: 8
* :start-after: update/start
* :end-before: update/end
*
* @endrst
*/
template <typename Fn>
IMMER_NODISCARD box update(Fn&& fn) const&
{
return std::forward<Fn>(fn)(get());
}
template <typename Fn>
IMMER_NODISCARD box&& update(Fn&& fn) &&
{
if (impl_->unique())
impl_->value = std::forward<Fn>(fn)(std::move(impl_->value));
else
*this = std::forward<Fn>(fn)(impl_->value);
return std::move(*this);
}
};
} // namespace immer
namespace std {
template <typename T, typename MP>
struct hash<immer::box<T, MP>>
{
std::size_t operator()(const immer::box<T, MP>& x) const
{
return std::hash<T>{}(*x);
}
};
} // namespace std

93
third_party/immer/immer/config.hpp vendored Normal file
View file

@ -0,0 +1,93 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#if defined(__has_cpp_attribute)
#if __has_cpp_attribute(nodiscard)
#define IMMER_NODISCARD [[nodiscard]]
#endif
#else
#if _MSVC_LANG >= 201703L
#define IMMER_NODISCARD [[nodiscard]]
#endif
#endif
#ifndef IMMER_NODISCARD
#define IMMER_NODISCARD
#endif
#ifndef IMMER_TAGGED_NODE
#ifdef NDEBUG
#define IMMER_TAGGED_NODE 0
#else
#define IMMER_TAGGED_NODE 1
#endif
#endif
#if IMMER_TAGGED_NODE
#define IMMER_ASSERT_TAGGED(assertion) assert(assertion)
#else
#define IMMER_ASSERT_TAGGED(assertion)
#endif
#ifndef IMMER_DEBUG_TRACES
#define IMMER_DEBUG_TRACES 0
#endif
#ifndef IMMER_DEBUG_PRINT
#define IMMER_DEBUG_PRINT 0
#endif
#ifndef IMMER_DEBUG_DEEP_CHECK
#define IMMER_DEBUG_DEEP_CHECK 0
#endif
#if IMMER_DEBUG_TRACES || IMMER_DEBUG_PRINT
#include <iostream>
#include <prettyprint.hpp>
#endif
#if IMMER_DEBUG_TRACES
#define IMMER_TRACE(...) std::cout << __VA_ARGS__ << std::endl
#else
#define IMMER_TRACE(...)
#endif
#define IMMER_TRACE_F(...) \
IMMER_TRACE(__FILE__ << ":" << __LINE__ << ": " << __VA_ARGS__)
#define IMMER_TRACE_E(expr) IMMER_TRACE(" " << #expr << " = " << (expr))
#if defined(_MSC_VER)
#define IMMER_UNREACHABLE __assume(false)
#define IMMER_LIKELY(cond) cond
#define IMMER_UNLIKELY(cond) cond
#define IMMER_FORCEINLINE __forceinline
#define IMMER_PREFETCH(p)
#else
#define IMMER_UNREACHABLE __builtin_unreachable()
#define IMMER_LIKELY(cond) __builtin_expect(!!(cond), 1)
#define IMMER_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
#define IMMER_FORCEINLINE inline __attribute__((always_inline))
#define IMMER_PREFETCH(p)
// #define IMMER_PREFETCH(p) __builtin_prefetch(p)
#endif
#define IMMER_DESCENT_DEEP 0
#ifdef NDEBUG
#define IMMER_ENABLE_DEBUG_SIZE_HEAP 0
#else
#define IMMER_ENABLE_DEBUG_SIZE_HEAP 1
#endif
namespace immer {
const auto default_bits = 5;
const auto default_free_list_size = 1 << 10;
} // namespace immer

View file

@ -0,0 +1,203 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/algorithm.hpp>
#include <immer/detail/arrays/node.hpp>
namespace immer {
namespace detail {
namespace arrays {
template <typename T, typename MemoryPolicy>
struct no_capacity
{
using node_t = node<T, MemoryPolicy>;
using edit_t = typename MemoryPolicy::transience_t::edit;
using size_t = std::size_t;
node_t* ptr;
size_t size;
static const no_capacity& empty()
{
static const no_capacity empty_{
node_t::make_n(0),
0,
};
return empty_;
}
no_capacity(node_t* p, size_t s)
: ptr{p}
, size{s}
{}
no_capacity(const no_capacity& other)
: no_capacity{other.ptr, other.size}
{
inc();
}
no_capacity(no_capacity&& other)
: no_capacity{empty()}
{
swap(*this, other);
}
no_capacity& operator=(const no_capacity& other)
{
auto next = other;
swap(*this, next);
return *this;
}
no_capacity& operator=(no_capacity&& other)
{
swap(*this, other);
return *this;
}
friend void swap(no_capacity& x, no_capacity& y)
{
using std::swap;
swap(x.ptr, y.ptr);
swap(x.size, y.size);
}
~no_capacity() { dec(); }
void inc()
{
using immer::detail::get;
ptr->refs().inc();
}
void dec()
{
using immer::detail::get;
if (ptr->refs().dec())
node_t::delete_n(ptr, size, size);
}
T* data() { return ptr->data(); }
const T* data() const { return ptr->data(); }
T* data_mut(edit_t e)
{
if (!ptr->can_mutate(e))
ptr = node_t::copy_e(e, size, ptr, size);
return data();
}
template <typename Iter,
typename Sent,
std::enable_if_t<is_forward_iterator_v<Iter> &&
compatible_sentinel_v<Iter, Sent>,
bool> = true>
static no_capacity from_range(Iter first, Sent last)
{
auto count = static_cast<size_t>(distance(first, last));
if (count == 0)
return empty();
else
return {
node_t::copy_n(count, first, last),
count,
};
}
static no_capacity from_fill(size_t n, T v)
{
return {node_t::fill_n(n, v), n};
}
template <typename U>
static no_capacity from_initializer_list(std::initializer_list<U> values)
{
using namespace std;
return from_range(begin(values), end(values));
}
template <typename Fn>
void for_each_chunk(Fn&& fn) const
{
std::forward<Fn>(fn)(data(), data() + size);
}
template <typename Fn>
bool for_each_chunk_p(Fn&& fn) const
{
return std::forward<Fn>(fn)(data(), data() + size);
}
const T& get(std::size_t index) const { return data()[index]; }
const T& get_check(std::size_t index) const
{
if (index >= size)
throw std::out_of_range{"out of range"};
return data()[index];
}
bool equals(const no_capacity& other) const
{
return ptr == other.ptr ||
(size == other.size &&
std::equal(data(), data() + size, other.data()));
}
no_capacity push_back(T value) const
{
auto p = node_t::copy_n(size + 1, ptr, size);
try {
new (p->data() + size) T{std::move(value)};
return {p, size + 1};
} catch (...) {
node_t::delete_n(p, size, size + 1);
throw;
}
}
no_capacity assoc(std::size_t idx, T value) const
{
auto p = node_t::copy_n(size, ptr, size);
try {
p->data()[idx] = std::move(value);
return {p, size};
} catch (...) {
node_t::delete_n(p, size, size);
throw;
}
}
template <typename Fn>
no_capacity update(std::size_t idx, Fn&& op) const
{
auto p = node_t::copy_n(size, ptr, size);
try {
auto& elem = p->data()[idx];
elem = std::forward<Fn>(op)(std::move(elem));
return {p, size};
} catch (...) {
node_t::delete_n(p, size, size);
throw;
}
}
no_capacity take(std::size_t sz) const
{
auto p = node_t::copy_n(sz, ptr, sz);
return {p, sz};
}
};
} // namespace arrays
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,127 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/combine_standard_layout.hpp>
#include <immer/detail/type_traits.hpp>
#include <immer/detail/util.hpp>
#include <limits>
namespace immer {
namespace detail {
namespace arrays {
template <typename T, typename MemoryPolicy>
struct node
{
using memory = MemoryPolicy;
using heap = typename MemoryPolicy::heap::type;
using transience = typename memory::transience_t;
using refs_t = typename memory::refcount;
using ownee_t = typename transience::ownee;
using node_t = node;
using edit_t = typename transience::edit;
struct data_t
{
aligned_storage_for<T> buffer;
};
using impl_t = combine_standard_layout_t<data_t, refs_t, ownee_t>;
impl_t impl;
constexpr static std::size_t sizeof_n(size_t count)
{
return immer_offsetof(impl_t, d.buffer) +
sizeof(T) * (count == 0 ? 1 : count);
}
refs_t& refs() const { return auto_const_cast(get<refs_t>(impl)); }
const ownee_t& ownee() const { return get<ownee_t>(impl); }
ownee_t& ownee() { return get<ownee_t>(impl); }
const T* data() const { return reinterpret_cast<const T*>(&impl.d.buffer); }
T* data() { return reinterpret_cast<T*>(&impl.d.buffer); }
bool can_mutate(edit_t e) const
{
return refs().unique() || ownee().can_mutate(e);
}
static void delete_n(node_t* p, size_t sz, size_t cap)
{
destroy_n(p->data(), sz);
heap::deallocate(sizeof_n(cap), p);
}
static node_t* make_n(size_t n)
{
return new (heap::allocate(sizeof_n(n))) node_t{};
}
static node_t* make_e(edit_t e, size_t n)
{
auto p = make_n(n);
p->ownee() = e;
return p;
}
static node_t* fill_n(size_t n, T v)
{
auto p = make_n(n);
try {
std::uninitialized_fill_n(p->data(), n, v);
return p;
} catch (...) {
heap::deallocate(sizeof_n(n), p);
throw;
}
}
template <typename Iter,
typename Sent,
std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent>,
bool> = true>
static node_t* copy_n(size_t n, Iter first, Sent last)
{
auto p = make_n(n);
try {
uninitialized_copy(first, last, p->data());
return p;
} catch (...) {
heap::deallocate(sizeof_n(n), p);
throw;
}
}
static node_t* copy_n(size_t n, node_t* p, size_t count)
{
return copy_n(n, p->data(), p->data() + count);
}
template <typename Iter>
static node_t* copy_e(edit_t e, size_t n, Iter first, Iter last)
{
auto p = copy_n(n, first, last);
p->ownee() = e;
return p;
}
static node_t* copy_e(edit_t e, size_t n, node_t* p, size_t count)
{
return copy_e(e, n, p->data(), p->data() + count);
}
};
} // namespace arrays
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,303 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/arrays/no_capacity.hpp>
namespace immer {
namespace detail {
namespace arrays {
template <typename T, typename MemoryPolicy>
struct with_capacity
{
using no_capacity_t = no_capacity<T, MemoryPolicy>;
using node_t = node<T, MemoryPolicy>;
using edit_t = typename MemoryPolicy::transience_t::edit;
using size_t = std::size_t;
node_t* ptr;
size_t size;
size_t capacity;
static const with_capacity& empty()
{
static const with_capacity empty_{node_t::make_n(1), 0, 1};
return empty_;
}
with_capacity(node_t* p, size_t s, size_t c)
: ptr{p}
, size{s}
, capacity{c}
{}
with_capacity(const with_capacity& other)
: with_capacity{other.ptr, other.size, other.capacity}
{
inc();
}
with_capacity(const no_capacity_t& other)
: with_capacity{other.ptr, other.size, other.size}
{
inc();
}
with_capacity(with_capacity&& other)
: with_capacity{empty()}
{
swap(*this, other);
}
with_capacity& operator=(const with_capacity& other)
{
auto next = other;
swap(*this, next);
return *this;
}
with_capacity& operator=(with_capacity&& other)
{
swap(*this, other);
return *this;
}
friend void swap(with_capacity& x, with_capacity& y)
{
using std::swap;
swap(x.ptr, y.ptr);
swap(x.size, y.size);
swap(x.capacity, y.capacity);
}
~with_capacity() { dec(); }
void inc()
{
using immer::detail::get;
ptr->refs().inc();
}
void dec()
{
using immer::detail::get;
if (ptr->refs().dec())
node_t::delete_n(ptr, size, capacity);
}
const T* data() const { return ptr->data(); }
T* data() { return ptr->data(); }
T* data_mut(edit_t e)
{
if (!ptr->can_mutate(e)) {
auto p = node_t::copy_e(e, capacity, ptr, size);
dec();
ptr = p;
}
return data();
}
operator no_capacity_t() const
{
if (size == capacity) {
ptr->refs().inc();
return {ptr, size};
} else {
return {node_t::copy_n(size, ptr, size), size};
}
}
template <typename Iter,
typename Sent,
std::enable_if_t<is_forward_iterator_v<Iter> &&
compatible_sentinel_v<Iter, Sent>,
bool> = true>
static with_capacity from_range(Iter first, Sent last)
{
auto count = static_cast<size_t>(distance(first, last));
if (count == 0)
return empty();
else
return {node_t::copy_n(count, first, last), count, count};
}
template <typename U>
static with_capacity from_initializer_list(std::initializer_list<U> values)
{
using namespace std;
return from_range(begin(values), end(values));
}
static with_capacity from_fill(size_t n, T v)
{
return {node_t::fill_n(n, v), n, n};
}
template <typename Fn>
void for_each_chunk(Fn&& fn) const
{
std::forward<Fn>(fn)(data(), data() + size);
}
template <typename Fn>
bool for_each_chunk_p(Fn&& fn) const
{
return std::forward<Fn>(fn)(data(), data() + size);
}
const T& get(std::size_t index) const { return data()[index]; }
const T& get_check(std::size_t index) const
{
if (index >= size)
throw std::out_of_range{"out of range"};
return data()[index];
}
bool equals(const with_capacity& other) const
{
return ptr == other.ptr ||
(size == other.size &&
std::equal(data(), data() + size, other.data()));
}
static size_t recommend_up(size_t sz, size_t cap)
{
auto max = std::numeric_limits<size_t>::max();
return sz <= cap ? cap
: cap >= max / 2 ? max
/* otherwise */
: std::max(2 * cap, sz);
}
static size_t recommend_down(size_t sz, size_t cap)
{
return sz == 0 ? 1
: sz < cap / 2 ? sz * 2 :
/* otherwise */ cap;
}
with_capacity push_back(T value) const
{
auto cap = recommend_up(size + 1, capacity);
auto p = node_t::copy_n(cap, ptr, size);
try {
new (p->data() + size) T{std::move(value)};
return {p, size + 1, cap};
} catch (...) {
node_t::delete_n(p, size, cap);
throw;
}
}
void push_back_mut(edit_t e, T value)
{
if (ptr->can_mutate(e) && capacity > size) {
new (data() + size) T{std::move(value)};
++size;
} else {
auto cap = recommend_up(size + 1, capacity);
auto p = node_t::copy_e(e, cap, ptr, size);
try {
new (p->data() + size) T{std::move(value)};
*this = {p, size + 1, cap};
} catch (...) {
node_t::delete_n(p, size, cap);
throw;
}
}
}
with_capacity assoc(std::size_t idx, T value) const
{
auto p = node_t::copy_n(capacity, ptr, size);
try {
p->data()[idx] = std::move(value);
return {p, size, capacity};
} catch (...) {
node_t::delete_n(p, size, capacity);
throw;
}
}
void assoc_mut(edit_t e, std::size_t idx, T value)
{
if (ptr->can_mutate(e)) {
data()[idx] = std::move(value);
} else {
auto p = node_t::copy_n(capacity, ptr, size);
try {
p->data()[idx] = std::move(value);
*this = {p, size, capacity};
} catch (...) {
node_t::delete_n(p, size, capacity);
throw;
}
}
}
template <typename Fn>
with_capacity update(std::size_t idx, Fn&& op) const
{
auto p = node_t::copy_n(capacity, ptr, size);
try {
auto& elem = p->data()[idx];
elem = std::forward<Fn>(op)(std::move(elem));
return {p, size, capacity};
} catch (...) {
node_t::delete_n(p, size, capacity);
throw;
}
}
template <typename Fn>
void update_mut(edit_t e, std::size_t idx, Fn&& op)
{
if (ptr->can_mutate(e)) {
auto& elem = data()[idx];
elem = std::forward<Fn>(op)(std::move(elem));
} else {
auto p = node_t::copy_e(e, capacity, ptr, size);
try {
auto& elem = p->data()[idx];
elem = std::forward<Fn>(op)(std::move(elem));
*this = {p, size, capacity};
} catch (...) {
node_t::delete_n(p, size, capacity);
throw;
}
}
}
with_capacity take(std::size_t sz) const
{
auto cap = recommend_down(sz, capacity);
auto p = node_t::copy_n(cap, ptr, sz);
return {p, sz, cap};
}
void take_mut(edit_t e, std::size_t sz)
{
if (ptr->can_mutate(e)) {
destroy_n(data() + size, size - sz);
size = sz;
} else {
auto cap = recommend_down(sz, capacity);
auto p = node_t::copy_e(e, cap, ptr, sz);
*this = {p, sz, cap};
}
}
};
} // namespace arrays
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,235 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <type_traits>
#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ == 1
#define IMMER_BROKEN_STANDARD_LAYOUT_DETECTION 1
#define immer_offsetof(st, m) ((std::size_t) & (((st*) 0)->m))
#else
#define IMMER_BROKEN_STANDARD_LAYOUT_DETECTION 0
#define immer_offsetof offsetof
#endif
namespace immer {
namespace detail {
//
// Metafunction that returns a standard layout struct that combines
// all the standard layout types in `Ts...`, while making sure that
// empty base optimizations are used.
//
// To query a part of the type do `get<my_part>(x)`;
//
// This is useful when putting together a type that merges various
// types coming from different policies. Some of them might be empty,
// so we shall enable empty base optimizations. But if we just
// inherit from all of them, we would break the "standard layout"
// rules, preventing us from using `offseof(...)`. So metafunction
// will generate the type by sometimes inheriting, sometimes adding as
// member.
//
// Note that the types are added to the combined type from right to
// left!
//
template <typename... Ts>
struct combine_standard_layout;
template <typename... Ts>
using combine_standard_layout_t = typename combine_standard_layout<Ts...>::type;
namespace csl {
template <typename T>
struct type_t
{};
template <typename U, typename T>
U& get(T& x);
template <typename U, typename T>
const U& get(const T& x);
template <typename T, typename Next = void>
struct inherit
{
struct type
: T
, Next
{
using Next::get_;
template <typename U>
friend decltype(auto) get(type& x)
{
return x.get_(type_t<U>{});
}
template <typename U>
friend decltype(auto) get(const type& x)
{
return x.get_(type_t<U>{});
}
T& get_(type_t<T>) { return *this; }
const T& get_(type_t<T>) const { return *this; }
};
};
template <typename T>
struct inherit<T, void>
{
struct type : T
{
template <typename U>
friend decltype(auto) get(type& x)
{
return x.get_(type_t<U>{});
}
template <typename U>
friend decltype(auto) get(const type& x)
{
return x.get_(type_t<U>{});
}
T& get_(type_t<T>) { return *this; }
const T& get_(type_t<T>) const { return *this; }
};
};
template <typename T, typename Next = void>
struct member
{
struct type : Next
{
T d;
using Next::get_;
template <typename U>
friend decltype(auto) get(type& x)
{
return x.get_(type_t<U>{});
}
template <typename U>
friend decltype(auto) get(const type& x)
{
return x.get_(type_t<U>{});
}
T& get_(type_t<T>) { return d; }
const T& get_(type_t<T>) const { return d; }
};
};
template <typename T>
struct member<T, void>
{
struct type
{
T d;
template <typename U>
friend decltype(auto) get(type& x)
{
return x.get_(type_t<U>{});
}
template <typename U>
friend decltype(auto) get(const type& x)
{
return x.get_(type_t<U>{});
}
T& get_(type_t<T>) { return d; }
const T& get_(type_t<T>) const { return d; }
};
};
template <typename T, typename Next>
struct member_two
{
struct type
{
Next n;
T d;
template <typename U>
friend decltype(auto) get(type& x)
{
return x.get_(type_t<U>{});
}
template <typename U>
friend decltype(auto) get(const type& x)
{
return x.get_(type_t<U>{});
}
T& get_(type_t<T>) { return d; }
const T& get_(type_t<T>) const { return d; }
template <typename U>
auto get_(type_t<U> t) -> decltype(auto)
{
return n.get_(t);
}
template <typename U>
auto get_(type_t<U> t) const -> decltype(auto)
{
return n.get_(t);
}
};
};
template <typename... Ts>
struct combine_standard_layout_aux;
template <typename T>
struct combine_standard_layout_aux<T>
{
static_assert(std::is_standard_layout<T>::value, "");
using type = typename std::conditional_t<std::is_empty<T>::value,
csl::inherit<T>,
csl::member<T>>::type;
};
template <typename T, typename... Ts>
struct combine_standard_layout_aux<T, Ts...>
{
static_assert(std::is_standard_layout<T>::value, "");
using this_t = T;
using next_t = typename combine_standard_layout_aux<Ts...>::type;
static constexpr auto empty_this = std::is_empty<this_t>::value;
static constexpr auto empty_next = std::is_empty<next_t>::value;
using type = typename std::conditional_t<
empty_this,
inherit<this_t, next_t>,
std::conditional_t<empty_next,
member<this_t, next_t>,
member_two<this_t, next_t>>>::type;
};
} // namespace csl
using csl::get;
template <typename... Ts>
struct combine_standard_layout
{
using type = typename csl::combine_standard_layout_aux<Ts...>::type;
#if !IMMER_BROKEN_STANDARD_LAYOUT_DETECTION
static_assert(std::is_standard_layout<type>::value, "");
#endif
};
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,108 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <cstdint>
#if defined(_MSC_VER)
#include <intrin.h> // __popcnt
#endif
namespace immer {
namespace detail {
namespace hamts {
using size_t = std::size_t;
using hash_t = std::size_t;
using bits_t = std::uint32_t;
using count_t = std::uint32_t;
using shift_t = std::uint32_t;
template <bits_t B>
struct get_bitmap_type
{
static_assert(B < 6u, "B > 6 is not supported.");
using type = std::uint32_t;
};
template <>
struct get_bitmap_type<6u>
{
using type = std::uint64_t;
};
template <bits_t B, typename T = count_t>
constexpr T branches = T{1u} << B;
template <bits_t B, typename T = size_t>
constexpr T mask = branches<B, T> - 1u;
template <bits_t B, typename T = count_t>
constexpr T max_depth = (sizeof(hash_t) * 8u + B - 1u) / B;
template <bits_t B, typename T = count_t>
constexpr T max_shift = max_depth<B, count_t>* B;
#define IMMER_HAS_BUILTIN_POPCOUNT 1
inline auto popcount_fallback(std::uint32_t x)
{
// More alternatives:
// https://en.wikipedia.org/wiki/Hamming_weight
// http://wm.ite.pl/articles/sse-popcount.html
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
x = x - ((x >> 1) & 0x55555555u);
x = (x & 0x33333333u) + ((x >> 2) & 0x33333333u);
return (((x + (x >> 4u)) & 0xF0F0F0Fu) * 0x1010101u) >> 24u;
}
inline auto popcount_fallback(std::uint64_t x)
{
x = x - ((x >> 1) & 0x5555555555555555u);
x = (x & 0x3333333333333333u) + ((x >> 2u) & 0x3333333333333333u);
return (((x + (x >> 4)) & 0x0F0F0F0F0F0F0F0Fu) * 0x0101010101010101u) >>
56u;
}
inline count_t popcount(std::uint32_t x)
{
#if IMMER_HAS_BUILTIN_POPCOUNT
#if defined(_MSC_VER)
return __popcnt(x);
#else
return __builtin_popcount(x);
#endif
#else
return popcount_fallback(x);
#endif
}
inline count_t popcount(std::uint64_t x)
{
#if IMMER_HAS_BUILTIN_POPCOUNT
#if defined(_MSC_VER)
#if defined(_WIN64)
return __popcnt64(x);
#else
// TODO: benchmark against popcount_fallback(std::uint64_t x)
return popcount(static_cast<std::uint32_t>(x >> 32)) +
popcount(static_cast<std::uint32_t>(x));
#endif
#else
return __builtin_popcountll(x);
#endif
#else
return popcount_fallback(x);
#endif
}
} // namespace hamts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,473 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <immer/detail/hamts/node.hpp>
#include <algorithm>
namespace immer {
namespace detail {
namespace hamts {
template <typename T,
typename Hash,
typename Equal,
typename MemoryPolicy,
bits_t B>
struct champ
{
static constexpr auto bits = B;
using node_t = node<T, Hash, Equal, MemoryPolicy, B>;
using bitmap_t = typename get_bitmap_type<B>::type;
static_assert(branches<B> <= sizeof(bitmap_t) * 8, "");
node_t* root;
size_t size;
static const champ& empty()
{
static const champ empty_{
node_t::make_inner_n(0),
0,
};
return empty_;
}
champ(node_t* r, size_t sz)
: root{r}
, size{sz}
{}
champ(const champ& other)
: champ{other.root, other.size}
{
inc();
}
champ(champ&& other)
: champ{empty()}
{
swap(*this, other);
}
champ& operator=(const champ& other)
{
auto next = other;
swap(*this, next);
return *this;
}
champ& operator=(champ&& other)
{
swap(*this, other);
return *this;
}
friend void swap(champ& x, champ& y)
{
using std::swap;
swap(x.root, y.root);
swap(x.size, y.size);
}
~champ() { dec(); }
void inc() const { root->inc(); }
void dec() const
{
if (root->dec())
node_t::delete_deep(root, 0);
}
template <typename Fn>
void for_each_chunk(Fn&& fn) const
{
for_each_chunk_traversal(root, 0, fn);
}
template <typename Fn>
void for_each_chunk_traversal(node_t* node, count_t depth, Fn&& fn) const
{
if (depth < max_depth<B>) {
auto datamap = node->datamap();
if (datamap)
fn(node->values(), node->values() + popcount(datamap));
auto nodemap = node->nodemap();
if (nodemap) {
auto fst = node->children();
auto lst = fst + popcount(nodemap);
for (; fst != lst; ++fst)
for_each_chunk_traversal(*fst, depth + 1, fn);
}
} else {
fn(node->collisions(),
node->collisions() + node->collision_count());
}
}
template <typename Project, typename Default, typename K>
decltype(auto) get(const K& k) const
{
auto node = root;
auto hash = Hash{}(k);
for (auto i = count_t{}; i < max_depth<B>; ++i) {
auto bit = bitmap_t{1u} << (hash & mask<B>);
if (node->nodemap() & bit) {
auto offset = popcount(node->nodemap() & (bit - 1));
node = node->children()[offset];
hash = hash >> B;
} else if (node->datamap() & bit) {
auto offset = popcount(node->datamap() & (bit - 1));
auto val = node->values() + offset;
if (Equal{}(*val, k))
return Project{}(*val);
else
return Default{}();
} else {
return Default{}();
}
}
auto fst = node->collisions();
auto lst = fst + node->collision_count();
for (; fst != lst; ++fst)
if (Equal{}(*fst, k))
return Project{}(*fst);
return Default{}();
}
std::pair<node_t*, bool>
do_add(node_t* node, T v, hash_t hash, shift_t shift) const
{
if (shift == max_shift<B>) {
auto fst = node->collisions();
auto lst = fst + node->collision_count();
for (; fst != lst; ++fst)
if (Equal{}(*fst, v))
return {
node_t::copy_collision_replace(node, fst, std::move(v)),
false};
return {node_t::copy_collision_insert(node, std::move(v)), true};
} else {
auto idx = (hash & (mask<B> << shift)) >> shift;
auto bit = bitmap_t{1u} << idx;
if (node->nodemap() & bit) {
auto offset = popcount(node->nodemap() & (bit - 1));
auto result = do_add(
node->children()[offset], std::move(v), hash, shift + B);
try {
result.first =
node_t::copy_inner_replace(node, offset, result.first);
return result;
} catch (...) {
node_t::delete_deep_shift(result.first, shift + B);
throw;
}
} else if (node->datamap() & bit) {
auto offset = popcount(node->datamap() & (bit - 1));
auto val = node->values() + offset;
if (Equal{}(*val, v))
return {node_t::copy_inner_replace_value(
node, offset, std::move(v)),
false};
else {
auto child = node_t::make_merged(
shift + B, std::move(v), hash, *val, Hash{}(*val));
try {
return {node_t::copy_inner_replace_merged(
node, bit, offset, child),
true};
} catch (...) {
node_t::delete_deep_shift(child, shift + B);
throw;
}
}
} else {
return {
node_t::copy_inner_insert_value(node, bit, std::move(v)),
true};
}
}
}
champ add(T v) const
{
auto hash = Hash{}(v);
auto res = do_add(root, std::move(v), hash, 0);
auto new_size = size + (res.second ? 1 : 0);
return {res.first, new_size};
}
template <typename Project,
typename Default,
typename Combine,
typename K,
typename Fn>
std::pair<node_t*, bool>
do_update(node_t* node, K&& k, Fn&& fn, hash_t hash, shift_t shift) const
{
if (shift == max_shift<B>) {
auto fst = node->collisions();
auto lst = fst + node->collision_count();
for (; fst != lst; ++fst)
if (Equal{}(*fst, k))
return {
node_t::copy_collision_replace(
node,
fst,
Combine{}(std::forward<K>(k),
std::forward<Fn>(fn)(Project{}(*fst)))),
false};
return {node_t::copy_collision_insert(
node,
Combine{}(std::forward<K>(k),
std::forward<Fn>(fn)(Default{}()))),
true};
} else {
auto idx = (hash & (mask<B> << shift)) >> shift;
auto bit = bitmap_t{1u} << idx;
if (node->nodemap() & bit) {
auto offset = popcount(node->nodemap() & (bit - 1));
auto result = do_update<Project, Default, Combine>(
node->children()[offset],
k,
std::forward<Fn>(fn),
hash,
shift + B);
try {
result.first =
node_t::copy_inner_replace(node, offset, result.first);
return result;
} catch (...) {
node_t::delete_deep_shift(result.first, shift + B);
throw;
}
} else if (node->datamap() & bit) {
auto offset = popcount(node->datamap() & (bit - 1));
auto val = node->values() + offset;
if (Equal{}(*val, k))
return {
node_t::copy_inner_replace_value(
node,
offset,
Combine{}(std::forward<K>(k),
std::forward<Fn>(fn)(Project{}(*val)))),
false};
else {
auto child = node_t::make_merged(
shift + B,
Combine{}(std::forward<K>(k),
std::forward<Fn>(fn)(Default{}())),
hash,
*val,
Hash{}(*val));
try {
return {node_t::copy_inner_replace_merged(
node, bit, offset, child),
true};
} catch (...) {
node_t::delete_deep_shift(child, shift + B);
throw;
}
}
} else {
return {node_t::copy_inner_insert_value(
node,
bit,
Combine{}(std::forward<K>(k),
std::forward<Fn>(fn)(Default{}()))),
true};
}
}
}
template <typename Project,
typename Default,
typename Combine,
typename K,
typename Fn>
champ update(const K& k, Fn&& fn) const
{
auto hash = Hash{}(k);
auto res = do_update<Project, Default, Combine>(
root, k, std::forward<Fn>(fn), hash, 0);
auto new_size = size + (res.second ? 1 : 0);
return {res.first, new_size};
}
// basically:
// variant<monostate_t, T*, node_t*>
// boo bad we are not using... C++17 :'(
struct sub_result
{
enum kind_t
{
nothing,
singleton,
tree
};
union data_t
{
T* singleton;
node_t* tree;
};
kind_t kind;
data_t data;
sub_result()
: kind{nothing} {};
sub_result(T* x)
: kind{singleton}
{
data.singleton = x;
};
sub_result(node_t* x)
: kind{tree}
{
data.tree = x;
};
};
template <typename K>
sub_result
do_sub(node_t* node, const K& k, hash_t hash, shift_t shift) const
{
if (shift == max_shift<B>) {
auto fst = node->collisions();
auto lst = fst + node->collision_count();
for (auto cur = fst; cur != lst; ++cur)
if (Equal{}(*cur, k))
return node->collision_count() > 2
? node_t::copy_collision_remove(node, cur)
: sub_result{fst + (cur == fst)};
return {};
} else {
auto idx = (hash & (mask<B> << shift)) >> shift;
auto bit = bitmap_t{1u} << idx;
if (node->nodemap() & bit) {
auto offset = popcount(node->nodemap() & (bit - 1));
auto result =
do_sub(node->children()[offset], k, hash, shift + B);
switch (result.kind) {
case sub_result::nothing:
return {};
case sub_result::singleton:
return node->datamap() == 0 &&
popcount(node->nodemap()) == 1 && shift > 0
? result
: node_t::copy_inner_replace_inline(
node, bit, offset, *result.data.singleton);
case sub_result::tree:
try {
return node_t::copy_inner_replace(
node, offset, result.data.tree);
} catch (...) {
node_t::delete_deep_shift(result.data.tree, shift + B);
throw;
}
}
} else if (node->datamap() & bit) {
auto offset = popcount(node->datamap() & (bit - 1));
auto val = node->values() + offset;
if (Equal{}(*val, k)) {
auto nv = popcount(node->datamap());
if (node->nodemap() || nv > 2)
return node_t::copy_inner_remove_value(
node, bit, offset);
else if (nv == 2) {
return shift > 0 ? sub_result{node->values() + !offset}
: node_t::make_inner_n(
0,
node->datamap() & ~bit,
node->values()[!offset]);
} else {
assert(shift == 0);
return empty().root->inc();
}
}
}
return {};
}
}
template <typename K>
champ sub(const K& k) const
{
auto hash = Hash{}(k);
auto res = do_sub(root, k, hash, 0);
switch (res.kind) {
case sub_result::nothing:
return *this;
case sub_result::tree:
return {res.data.tree, size - 1};
default:
IMMER_UNREACHABLE;
}
}
template <typename Eq = Equal>
bool equals(const champ& other) const
{
return size == other.size && equals_tree<Eq>(root, other.root, 0);
}
template <typename Eq>
static bool equals_tree(const node_t* a, const node_t* b, count_t depth)
{
if (a == b)
return true;
else if (depth == max_depth<B>) {
auto nv = a->collision_count();
return nv == b->collision_count() &&
equals_collisions<Eq>(a->collisions(), b->collisions(), nv);
} else {
if (a->nodemap() != b->nodemap() || a->datamap() != b->datamap())
return false;
auto n = popcount(a->nodemap());
for (auto i = count_t{}; i < n; ++i)
if (!equals_tree<Eq>(
a->children()[i], b->children()[i], depth + 1))
return false;
auto nv = popcount(a->datamap());
return !nv || equals_values<Eq>(a->values(), b->values(), nv);
}
}
template <typename Eq>
static bool equals_values(const T* a, const T* b, count_t n)
{
return std::equal(a, a + n, b, Eq{});
}
template <typename Eq>
static bool equals_collisions(const T* a, const T* b, count_t n)
{
auto ae = a + n;
auto be = b + n;
for (; a != ae; ++a) {
for (auto fst = b; fst != be; ++fst)
if (Eq{}(*a, *fst))
goto good;
return false;
good:
continue;
}
return true;
}
};
} // namespace hamts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,148 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/hamts/champ.hpp>
#include <immer/detail/iterator_facade.hpp>
namespace immer {
namespace detail {
namespace hamts {
template <typename T, typename Hash, typename Eq, typename MP, bits_t B>
struct champ_iterator
: iterator_facade<champ_iterator<T, Hash, Eq, MP, B>,
std::forward_iterator_tag,
T,
const T&,
std::ptrdiff_t,
const T*>
{
using tree_t = champ<T, Hash, Eq, MP, B>;
using node_t = typename tree_t::node_t;
struct end_t
{};
champ_iterator() = default;
champ_iterator(const tree_t& v)
: depth_{0}
{
if (v.root->datamap()) {
cur_ = v.root->values();
end_ = v.root->values() + popcount(v.root->datamap());
} else {
cur_ = end_ = nullptr;
}
path_[0] = &v.root;
ensure_valid_();
}
champ_iterator(const tree_t& v, end_t)
: cur_{nullptr}
, end_{nullptr}
, depth_{0}
{
path_[0] = &v.root;
}
champ_iterator(const champ_iterator& other)
: cur_{other.cur_}
, end_{other.end_}
, depth_{other.depth_}
{
std::copy(other.path_, other.path_ + depth_ + 1, path_);
}
private:
friend iterator_core_access;
T* cur_;
T* end_;
count_t depth_;
node_t* const* path_[max_depth<B> + 1];
void increment()
{
++cur_;
ensure_valid_();
}
bool step_down()
{
if (depth_ < max_depth<B>) {
auto parent = *path_[depth_];
if (parent->nodemap()) {
++depth_;
path_[depth_] = parent->children();
auto child = *path_[depth_];
if (depth_ < max_depth<B>) {
if (child->datamap()) {
cur_ = child->values();
end_ = cur_ + popcount(child->datamap());
}
} else {
cur_ = child->collisions();
end_ = cur_ + child->collision_count();
}
return true;
}
}
return false;
}
bool step_right()
{
while (depth_ > 0) {
auto parent = *path_[depth_ - 1];
auto last = parent->children() + popcount(parent->nodemap());
auto next = path_[depth_] + 1;
if (next < last) {
path_[depth_] = next;
auto child = *path_[depth_];
if (depth_ < max_depth<B>) {
if (child->datamap()) {
cur_ = child->values();
end_ = cur_ + popcount(child->datamap());
}
} else {
cur_ = child->collisions();
end_ = cur_ + child->collision_count();
}
return true;
}
--depth_;
}
return false;
}
void ensure_valid_()
{
while (cur_ == end_) {
while (step_down())
if (cur_ != end_)
return;
if (!step_right()) {
// end of sequence
assert(depth_ == 0);
cur_ = end_ = nullptr;
return;
}
}
}
bool equal(const champ_iterator& other) const { return cur_ == other.cur_; }
const T& dereference() const { return *cur_; }
};
} // namespace hamts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,717 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/combine_standard_layout.hpp>
#include <immer/detail/hamts/bits.hpp>
#include <immer/detail/util.hpp>
#include <cassert>
namespace immer {
namespace detail {
namespace hamts {
template <typename T,
typename Hash,
typename Equal,
typename MemoryPolicy,
bits_t B>
struct node
{
using node_t = node;
using memory = MemoryPolicy;
using heap_policy = typename memory::heap;
using heap = typename heap_policy::type;
using transience = typename memory::transience_t;
using refs_t = typename memory::refcount;
using ownee_t = typename transience::ownee;
using edit_t = typename transience::edit;
using value_t = T;
using bitmap_t = typename get_bitmap_type<B>::type;
enum class kind_t
{
collision,
inner
};
struct collision_t
{
count_t count;
aligned_storage_for<T> buffer;
};
struct values_data_t
{
aligned_storage_for<T> buffer;
};
using values_t = combine_standard_layout_t<values_data_t, refs_t>;
struct inner_t
{
bitmap_t nodemap;
bitmap_t datamap;
values_t* values;
aligned_storage_for<node_t*> buffer;
};
union data_t
{
inner_t inner;
collision_t collision;
};
struct impl_data_t
{
#if IMMER_TAGGED_NODE
kind_t kind;
#endif
data_t data;
};
using impl_t = combine_standard_layout_t<impl_data_t, refs_t>;
impl_t impl;
constexpr static std::size_t sizeof_values_n(count_t count)
{
return std::max(sizeof(values_t),
immer_offsetof(values_t, d.buffer) +
sizeof(values_data_t::buffer) * count);
}
constexpr static std::size_t sizeof_collision_n(count_t count)
{
return immer_offsetof(impl_t, d.data.collision.buffer) +
sizeof(collision_t::buffer) * count;
}
constexpr static std::size_t sizeof_inner_n(count_t count)
{
return immer_offsetof(impl_t, d.data.inner.buffer) +
sizeof(inner_t::buffer) * count;
}
#if IMMER_TAGGED_NODE
kind_t kind() const { return impl.d.kind; }
#endif
auto values()
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
assert(impl.d.data.inner.values);
return (T*) &impl.d.data.inner.values->d.buffer;
}
auto values() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
assert(impl.d.data.inner.values);
return (const T*) &impl.d.data.inner.values->d.buffer;
}
auto children()
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return (node_t**) &impl.d.data.inner.buffer;
}
auto children() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return (const node_t* const*) &impl.d.data.inner.buffer;
}
auto datamap() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return impl.d.data.inner.datamap;
}
auto nodemap() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return impl.d.data.inner.nodemap;
}
auto collision_count() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::collision);
return impl.d.data.collision.count;
}
T* collisions()
{
IMMER_ASSERT_TAGGED(kind() == kind_t::collision);
return (T*) &impl.d.data.collision.buffer;
}
const T* collisions() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::collision);
return (const T*) &impl.d.data.collision.buffer;
}
static refs_t& refs(const values_t* x)
{
return auto_const_cast(get<refs_t>(*x));
}
static const ownee_t& ownee(const values_t* x) { return get<ownee_t>(*x); }
static ownee_t& ownee(values_t* x) { return get<ownee_t>(*x); }
static refs_t& refs(const node_t* x)
{
return auto_const_cast(get<refs_t>(x->impl));
}
static const ownee_t& ownee(const node_t* x)
{
return get<ownee_t>(x->impl);
}
static ownee_t& ownee(node_t* x) { return get<ownee_t>(x->impl); }
static node_t* make_inner_n(count_t n)
{
assert(n <= branches<B>);
auto m = heap::allocate(sizeof_inner_n(n));
auto p = new (m) node_t;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
p->impl.d.data.inner.nodemap = 0;
p->impl.d.data.inner.datamap = 0;
p->impl.d.data.inner.values = nullptr;
return p;
}
static node_t* make_inner_n(count_t n, values_t* values)
{
auto p = make_inner_n(n);
if (values) {
p->impl.d.data.inner.values = values;
refs(values).inc();
}
return p;
}
static node_t* make_inner_n(count_t n, count_t nv)
{
assert(nv <= branches<B>);
auto p = make_inner_n(n);
if (nv) {
try {
p->impl.d.data.inner.values =
new (heap::allocate(sizeof_values_n(nv))) values_t{};
} catch (...) {
deallocate_inner(p, n);
throw;
}
}
return p;
}
static node_t* make_inner_n(count_t n, count_t idx, node_t* child)
{
assert(n >= 1);
auto p = make_inner_n(n);
p->impl.d.data.inner.nodemap = bitmap_t{1u} << idx;
p->children()[0] = child;
return p;
}
static node_t* make_inner_n(count_t n, bitmap_t bitmap, T x)
{
auto p = make_inner_n(n, 1);
p->impl.d.data.inner.datamap = bitmap;
try {
new (p->values()) T{std::move(x)};
} catch (...) {
deallocate_inner(p, n, 1);
throw;
}
return p;
}
static node_t*
make_inner_n(count_t n, count_t idx1, T x1, count_t idx2, T x2)
{
assert(idx1 != idx2);
auto p = make_inner_n(n, 2);
p->impl.d.data.inner.datamap =
(bitmap_t{1u} << idx1) | (bitmap_t{1u} << idx2);
auto assign = [&](auto&& x1, auto&& x2) {
auto vp = p->values();
try {
new (vp) T{std::move(x1)};
try {
new (vp + 1) T{std::move(x2)};
} catch (...) {
vp->~T();
throw;
}
} catch (...) {
deallocate_inner(p, n, 2);
throw;
}
};
if (idx1 < idx2)
assign(x1, x2);
else
assign(x2, x1);
return p;
}
static node_t* make_collision_n(count_t n)
{
auto m = heap::allocate(sizeof_collision_n(n));
auto p = new (m) node_t;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::collision;
#endif
p->impl.d.data.collision.count = n;
return p;
}
static node_t* make_collision(T v1, T v2)
{
auto m = heap::allocate(sizeof_collision_n(2));
auto p = new (m) node_t;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::collision;
#endif
p->impl.d.data.collision.count = 2;
auto cols = p->collisions();
try {
new (cols) T{std::move(v1)};
try {
new (cols + 1) T{std::move(v2)};
} catch (...) {
cols->~T();
throw;
}
} catch (...) {
deallocate_collision(p, 2);
throw;
}
return p;
}
static node_t* copy_collision_insert(node_t* src, T v)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::collision);
auto n = src->collision_count();
auto dst = make_collision_n(n + 1);
auto srcp = src->collisions();
auto dstp = dst->collisions();
try {
new (dstp) T{std::move(v)};
try {
std::uninitialized_copy(srcp, srcp + n, dstp + 1);
} catch (...) {
dstp->~T();
throw;
}
} catch (...) {
deallocate_collision(dst, n + 1);
throw;
}
return dst;
}
static node_t* copy_collision_remove(node_t* src, T* v)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::collision);
assert(src->collision_count() > 1);
auto n = src->collision_count();
auto dst = make_collision_n(n - 1);
auto srcp = src->collisions();
auto dstp = dst->collisions();
try {
dstp = std::uninitialized_copy(srcp, v, dstp);
try {
std::uninitialized_copy(v + 1, srcp + n, dstp);
} catch (...) {
destroy(dst->collisions(), dstp);
throw;
}
} catch (...) {
deallocate_collision(dst, n - 1);
throw;
}
return dst;
}
static node_t* copy_collision_replace(node_t* src, T* pos, T v)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::collision);
auto n = src->collision_count();
auto dst = make_collision_n(n);
auto srcp = src->collisions();
auto dstp = dst->collisions();
assert(pos >= srcp && pos < srcp + n);
try {
new (dstp) T{std::move(v)};
try {
dstp = std::uninitialized_copy(srcp, pos, dstp + 1);
try {
std::uninitialized_copy(pos + 1, srcp + n, dstp);
} catch (...) {
destroy(dst->collisions(), dstp);
throw;
}
} catch (...) {
dst->collisions()->~T();
throw;
}
} catch (...) {
deallocate_collision(dst, n);
throw;
}
return dst;
}
static node_t*
copy_inner_replace(node_t* src, count_t offset, node_t* child)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto n = popcount(src->nodemap());
auto dst = make_inner_n(n, src->impl.d.data.inner.values);
auto srcp = src->children();
auto dstp = dst->children();
dst->impl.d.data.inner.datamap = src->datamap();
dst->impl.d.data.inner.nodemap = src->nodemap();
std::uninitialized_copy(srcp, srcp + n, dstp);
inc_nodes(srcp, n);
srcp[offset]->dec_unsafe();
dstp[offset] = child;
return dst;
}
static node_t* copy_inner_replace_value(node_t* src, count_t offset, T v)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
assert(offset < popcount(src->datamap()));
auto n = popcount(src->nodemap());
auto nv = popcount(src->datamap());
auto dst = make_inner_n(n, nv);
dst->impl.d.data.inner.datamap = src->datamap();
dst->impl.d.data.inner.nodemap = src->nodemap();
try {
std::uninitialized_copy(
src->values(), src->values() + nv, dst->values());
try {
dst->values()[offset] = std::move(v);
} catch (...) {
destroy_n(dst->values(), nv);
throw;
}
} catch (...) {
deallocate_inner(dst, n, nv);
throw;
}
inc_nodes(src->children(), n);
std::uninitialized_copy(
src->children(), src->children() + n, dst->children());
return dst;
}
static node_t* copy_inner_replace_merged(node_t* src,
bitmap_t bit,
count_t voffset,
node_t* node)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
assert(!(src->nodemap() & bit));
assert(src->datamap() & bit);
assert(voffset == popcount(src->datamap() & (bit - 1)));
auto n = popcount(src->nodemap());
auto nv = popcount(src->datamap());
auto dst = make_inner_n(n + 1, nv - 1);
auto noffset = popcount(src->nodemap() & (bit - 1));
dst->impl.d.data.inner.datamap = src->datamap() & ~bit;
dst->impl.d.data.inner.nodemap = src->nodemap() | bit;
if (nv > 1) {
try {
std::uninitialized_copy(
src->values(), src->values() + voffset, dst->values());
try {
std::uninitialized_copy(src->values() + voffset + 1,
src->values() + nv,
dst->values() + voffset);
} catch (...) {
destroy_n(dst->values(), voffset);
throw;
}
} catch (...) {
deallocate_inner(dst, n + 1, nv - 1);
throw;
}
}
inc_nodes(src->children(), n);
std::uninitialized_copy(
src->children(), src->children() + noffset, dst->children());
std::uninitialized_copy(src->children() + noffset,
src->children() + n,
dst->children() + noffset + 1);
dst->children()[noffset] = node;
return dst;
}
static node_t* copy_inner_replace_inline(node_t* src,
bitmap_t bit,
count_t noffset,
T value)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
assert(!(src->datamap() & bit));
assert(src->nodemap() & bit);
assert(noffset == popcount(src->nodemap() & (bit - 1)));
auto n = popcount(src->nodemap());
auto nv = popcount(src->datamap());
auto dst = make_inner_n(n - 1, nv + 1);
auto voffset = popcount(src->datamap() & (bit - 1));
dst->impl.d.data.inner.nodemap = src->nodemap() & ~bit;
dst->impl.d.data.inner.datamap = src->datamap() | bit;
try {
if (nv)
std::uninitialized_copy(
src->values(), src->values() + voffset, dst->values());
try {
new (dst->values() + voffset) T{std::move(value)};
try {
if (nv)
std::uninitialized_copy(src->values() + voffset,
src->values() + nv,
dst->values() + voffset + 1);
} catch (...) {
dst->values()[voffset].~T();
throw;
}
} catch (...) {
destroy_n(dst->values(), voffset);
throw;
}
} catch (...) {
deallocate_inner(dst, n - 1, nv + 1);
throw;
}
inc_nodes(src->children(), n);
src->children()[noffset]->dec_unsafe();
std::uninitialized_copy(
src->children(), src->children() + noffset, dst->children());
std::uninitialized_copy(src->children() + noffset + 1,
src->children() + n,
dst->children() + noffset);
return dst;
}
static node_t*
copy_inner_remove_value(node_t* src, bitmap_t bit, count_t voffset)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
assert(!(src->nodemap() & bit));
assert(src->datamap() & bit);
assert(voffset == popcount(src->datamap() & (bit - 1)));
auto n = popcount(src->nodemap());
auto nv = popcount(src->datamap());
auto dst = make_inner_n(n, nv - 1);
dst->impl.d.data.inner.datamap = src->datamap() & ~bit;
dst->impl.d.data.inner.nodemap = src->nodemap();
if (nv > 1) {
try {
std::uninitialized_copy(
src->values(), src->values() + voffset, dst->values());
try {
std::uninitialized_copy(src->values() + voffset + 1,
src->values() + nv,
dst->values() + voffset);
} catch (...) {
destroy_n(dst->values(), voffset);
throw;
}
} catch (...) {
deallocate_inner(dst, n, nv - 1);
throw;
}
}
inc_nodes(src->children(), n);
std::uninitialized_copy(
src->children(), src->children() + n, dst->children());
return dst;
}
static node_t* copy_inner_insert_value(node_t* src, bitmap_t bit, T v)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto n = popcount(src->nodemap());
auto nv = popcount(src->datamap());
auto offset = popcount(src->datamap() & (bit - 1));
auto dst = make_inner_n(n, nv + 1);
dst->impl.d.data.inner.datamap = src->datamap() | bit;
dst->impl.d.data.inner.nodemap = src->nodemap();
try {
if (nv)
std::uninitialized_copy(
src->values(), src->values() + offset, dst->values());
try {
new (dst->values() + offset) T{std::move(v)};
try {
if (nv)
std::uninitialized_copy(src->values() + offset,
src->values() + nv,
dst->values() + offset + 1);
} catch (...) {
dst->values()[offset].~T();
throw;
}
} catch (...) {
destroy_n(dst->values(), offset);
throw;
}
} catch (...) {
deallocate_inner(dst, n, nv + 1);
throw;
}
inc_nodes(src->children(), n);
std::uninitialized_copy(
src->children(), src->children() + n, dst->children());
return dst;
}
static node_t*
make_merged(shift_t shift, T v1, hash_t hash1, T v2, hash_t hash2)
{
if (shift < max_shift<B>) {
auto idx1 = hash1 & (mask<B> << shift);
auto idx2 = hash2 & (mask<B> << shift);
if (idx1 == idx2) {
auto merged = make_merged(
shift + B, std::move(v1), hash1, std::move(v2), hash2);
try {
return make_inner_n(1, idx1 >> shift, merged);
} catch (...) {
delete_deep_shift(merged, shift + B);
throw;
}
} else {
return make_inner_n(0,
idx1 >> shift,
std::move(v1),
idx2 >> shift,
std::move(v2));
}
} else {
return make_collision(std::move(v1), std::move(v2));
}
}
node_t* inc()
{
refs(this).inc();
return this;
}
const node_t* inc() const
{
refs(this).inc();
return this;
}
bool dec() const { return refs(this).dec(); }
void dec_unsafe() const { refs(this).dec_unsafe(); }
static void inc_nodes(node_t** p, count_t n)
{
for (auto i = p, e = i + n; i != e; ++i)
refs(*i).inc();
}
static void delete_values(values_t* p, count_t n)
{
assert(p);
deallocate_values(p, n);
}
static void delete_inner(node_t* p)
{
assert(p);
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
auto vp = p->impl.d.data.inner.values;
if (vp && refs(vp).dec())
delete_values(vp, popcount(p->datamap()));
deallocate_inner(p, popcount(p->nodemap()));
}
static void delete_collision(node_t* p)
{
assert(p);
IMMER_ASSERT_TAGGED(p->kind() == kind_t::collision);
auto n = p->collision_count();
deallocate_collision(p, n);
}
static void delete_deep(node_t* p, shift_t s)
{
if (s == max_depth<B>)
delete_collision(p);
else {
auto fst = p->children();
auto lst = fst + popcount(p->nodemap());
for (; fst != lst; ++fst)
if ((*fst)->dec())
delete_deep(*fst, s + 1);
delete_inner(p);
}
}
static void delete_deep_shift(node_t* p, shift_t s)
{
if (s == max_shift<B>)
delete_collision(p);
else {
auto fst = p->children();
auto lst = fst + popcount(p->nodemap());
for (; fst != lst; ++fst)
if ((*fst)->dec())
delete_deep_shift(*fst, s + B);
delete_inner(p);
}
}
static void deallocate_values(values_t* p, count_t n)
{
destroy_n((T*) &p->d.buffer, n);
heap::deallocate(node_t::sizeof_values_n(n), p);
}
static void deallocate_collision(node_t* p, count_t n)
{
destroy_n(p->collisions(), n);
heap::deallocate(node_t::sizeof_collision_n(n), p);
}
static void deallocate_inner(node_t* p, count_t n)
{
heap::deallocate(node_t::sizeof_inner_n(n), p);
}
static void deallocate_inner(node_t* p, count_t n, count_t nv)
{
assert(nv);
heap::deallocate(node_t::sizeof_values_n(nv),
p->impl.d.data.inner.values);
heap::deallocate(node_t::sizeof_inner_n(n), p);
}
};
} // namespace hamts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,212 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <cstddef>
#include <iterator>
#include <type_traits>
namespace immer {
namespace detail {
struct iterator_core_access
{
template <typename T>
static decltype(auto) dereference(T&& x)
{
return x.dereference();
}
template <typename T>
static decltype(auto) increment(T&& x)
{
return x.increment();
}
template <typename T>
static decltype(auto) decrement(T&& x)
{
return x.decrement();
}
template <typename T1, typename T2>
static decltype(auto) equal(T1&& x1, T2&& x2)
{
return x1.equal(x2);
}
template <typename T, typename D>
static decltype(auto) advance(T&& x, D d)
{
return x.advance(d);
}
template <typename T1, typename T2>
static decltype(auto) distance_to(T1&& x1, T2&& x2)
{
return x1.distance_to(x2);
}
};
/*!
* Minimalistic reimplementation of boost::iterator_facade
*/
template <typename DerivedT,
typename IteratorCategoryT,
typename T,
typename ReferenceT = T&,
typename DifferenceTypeT = std::ptrdiff_t,
typename PointerT = T*>
class iterator_facade
{
public:
using iterator_category = IteratorCategoryT;
using value_type = T;
using difference_type = DifferenceTypeT;
using pointer = PointerT;
using reference = ReferenceT;
protected:
using access_t = iterator_core_access;
constexpr static auto is_random_access =
std::is_base_of<std::random_access_iterator_tag,
IteratorCategoryT>::value;
constexpr static auto is_bidirectional =
std::is_base_of<std::bidirectional_iterator_tag,
IteratorCategoryT>::value;
class reference_proxy
{
friend iterator_facade;
DerivedT iter_;
reference_proxy(DerivedT iter)
: iter_{std::move(iter)}
{}
public:
operator ReferenceT() const { return *iter_; }
};
const DerivedT& derived() const
{
static_assert(std::is_base_of<iterator_facade, DerivedT>::value,
"must pass a derived thing");
return *static_cast<const DerivedT*>(this);
}
DerivedT& derived()
{
static_assert(std::is_base_of<iterator_facade, DerivedT>::value,
"must pass a derived thing");
return *static_cast<DerivedT*>(this);
}
public:
ReferenceT operator*() const { return access_t::dereference(derived()); }
PointerT operator->() const { return &access_t::dereference(derived()); }
reference_proxy operator[](DifferenceTypeT n) const
{
static_assert(is_random_access, "");
return derived() + n;
}
bool operator==(const DerivedT& rhs) const
{
return access_t::equal(derived(), rhs);
}
bool operator!=(const DerivedT& rhs) const
{
return !access_t::equal(derived(), rhs);
}
DerivedT& operator++()
{
access_t::increment(derived());
return derived();
}
DerivedT operator++(int)
{
auto tmp = derived();
access_t::increment(derived());
return tmp;
}
DerivedT& operator--()
{
static_assert(is_bidirectional || is_random_access, "");
access_t::decrement(derived());
return derived();
}
DerivedT operator--(int)
{
static_assert(is_bidirectional || is_random_access, "");
auto tmp = derived();
access_t::decrement(derived());
return tmp;
}
DerivedT& operator+=(DifferenceTypeT n)
{
access_t::advance(derived(), n);
return derived();
}
DerivedT& operator-=(DifferenceTypeT n)
{
access_t::advance(derived(), -n);
return derived();
}
DerivedT operator+(DifferenceTypeT n) const
{
static_assert(is_random_access, "");
auto tmp = derived();
return tmp += n;
}
friend DerivedT operator+(DifferenceTypeT n, const DerivedT& i)
{
static_assert(is_random_access, "");
return i + n;
}
DerivedT operator-(DifferenceTypeT n) const
{
static_assert(is_random_access, "");
auto tmp = derived();
return tmp -= n;
}
DifferenceTypeT operator-(const DerivedT& rhs) const
{
static_assert(is_random_access, "");
return access_t::distance_to(rhs, derived());
}
bool operator<(const DerivedT& rhs) const
{
static_assert(is_random_access, "");
return access_t::distance_to(derived(), rhs) > 0;
}
bool operator<=(const DerivedT& rhs) const
{
static_assert(is_random_access, "");
return access_t::distance_to(derived(), rhs) >= 0;
}
bool operator>(const DerivedT& rhs) const
{
static_assert(is_random_access, "");
return access_t::distance_to(derived(), rhs) < 0;
}
bool operator>=(const DerivedT& rhs) const
{
static_assert(is_random_access, "");
return access_t::distance_to(derived(), rhs) <= 0;
}
};
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,33 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <cstdint>
namespace immer {
namespace detail {
namespace rbts {
using bits_t = std::uint32_t;
using shift_t = std::uint32_t;
using count_t = std::uint32_t;
using size_t = std::size_t;
template <bits_t B, typename T = count_t>
constexpr T branches = T{1} << B;
template <bits_t B, typename T = size_t>
constexpr T mask = branches<B, T> - 1;
template <bits_t B, bits_t BL>
constexpr shift_t endshift = shift_t{BL} - shift_t{B};
} // namespace rbts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,932 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/combine_standard_layout.hpp>
#include <immer/detail/rbts/bits.hpp>
#include <immer/detail/util.hpp>
#include <immer/heap/tags.hpp>
#include <cassert>
#include <cstddef>
#include <memory>
#include <type_traits>
namespace immer {
namespace detail {
namespace rbts {
template <typename T, typename MemoryPolicy, bits_t B, bits_t BL>
struct node
{
static constexpr auto bits = B;
static constexpr auto bits_leaf = BL;
using node_t = node;
using memory = MemoryPolicy;
using heap_policy = typename memory::heap;
using transience = typename memory::transience_t;
using refs_t = typename memory::refcount;
using ownee_t = typename transience::ownee;
using edit_t = typename transience::edit;
using value_t = T;
static constexpr bool embed_relaxed = memory::prefer_fewer_bigger_objects;
enum class kind_t
{
leaf,
inner
};
struct relaxed_data_t
{
count_t count;
size_t sizes[branches<B>];
};
using relaxed_data_with_meta_t =
combine_standard_layout_t<relaxed_data_t, refs_t, ownee_t>;
using relaxed_data_no_meta_t = combine_standard_layout_t<relaxed_data_t>;
using relaxed_t = std::conditional_t<embed_relaxed,
relaxed_data_no_meta_t,
relaxed_data_with_meta_t>;
struct leaf_t
{
aligned_storage_for<T> buffer;
};
struct inner_t
{
relaxed_t* relaxed;
aligned_storage_for<node_t*> buffer;
};
union data_t
{
inner_t inner;
leaf_t leaf;
};
struct impl_data_t
{
#if IMMER_TAGGED_NODE
kind_t kind;
#endif
data_t data;
};
using impl_t = combine_standard_layout_t<impl_data_t, refs_t, ownee_t>;
impl_t impl;
// assume that we need to keep headroom space in the node when we
// are doing reference counting, since any node may become
// transient when it has only one reference
constexpr static bool keep_headroom = !std::is_empty<refs_t>{};
constexpr static std::size_t sizeof_packed_leaf_n(count_t count)
{
return immer_offsetof(impl_t, d.data.leaf.buffer) +
sizeof(leaf_t::buffer) * count;
}
constexpr static std::size_t sizeof_packed_inner_n(count_t count)
{
return immer_offsetof(impl_t, d.data.inner.buffer) +
sizeof(inner_t::buffer) * count;
}
constexpr static std::size_t sizeof_packed_relaxed_n(count_t count)
{
return immer_offsetof(relaxed_t, d.sizes) + sizeof(size_t) * count;
}
constexpr static std::size_t sizeof_packed_inner_r_n(count_t count)
{
return embed_relaxed ? sizeof_packed_inner_n(count) +
sizeof_packed_relaxed_n(count)
: sizeof_packed_inner_n(count);
}
constexpr static std::size_t max_sizeof_leaf =
sizeof_packed_leaf_n(branches<BL>);
constexpr static std::size_t max_sizeof_inner =
sizeof_packed_inner_n(branches<B>);
constexpr static std::size_t max_sizeof_relaxed =
sizeof_packed_relaxed_n(branches<B>);
constexpr static std::size_t max_sizeof_inner_r =
sizeof_packed_inner_r_n(branches<B>);
constexpr static std::size_t sizeof_inner_n(count_t n)
{
return keep_headroom ? max_sizeof_inner : sizeof_packed_inner_n(n);
}
constexpr static std::size_t sizeof_inner_r_n(count_t n)
{
return keep_headroom ? max_sizeof_inner_r : sizeof_packed_inner_r_n(n);
}
constexpr static std::size_t sizeof_relaxed_n(count_t n)
{
return keep_headroom ? max_sizeof_relaxed : sizeof_packed_relaxed_n(n);
}
constexpr static std::size_t sizeof_leaf_n(count_t n)
{
return keep_headroom ? max_sizeof_leaf : sizeof_packed_leaf_n(n);
}
using heap =
typename heap_policy::template optimized<max_sizeof_inner>::type;
#if IMMER_TAGGED_NODE
kind_t kind() const { return impl.d.kind; }
#endif
relaxed_t* relaxed()
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return impl.d.data.inner.relaxed;
}
const relaxed_t* relaxed() const
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return impl.d.data.inner.relaxed;
}
node_t** inner()
{
IMMER_ASSERT_TAGGED(kind() == kind_t::inner);
return reinterpret_cast<node_t**>(&impl.d.data.inner.buffer);
}
T* leaf()
{
IMMER_ASSERT_TAGGED(kind() == kind_t::leaf);
return reinterpret_cast<T*>(&impl.d.data.leaf.buffer);
}
static refs_t& refs(const relaxed_t* x)
{
return auto_const_cast(get<refs_t>(*x));
}
static const ownee_t& ownee(const relaxed_t* x) { return get<ownee_t>(*x); }
static ownee_t& ownee(relaxed_t* x) { return get<ownee_t>(*x); }
static refs_t& refs(const node_t* x)
{
return auto_const_cast(get<refs_t>(x->impl));
}
static const ownee_t& ownee(const node_t* x)
{
return get<ownee_t>(x->impl);
}
static ownee_t& ownee(node_t* x) { return get<ownee_t>(x->impl); }
static node_t* make_inner_n(count_t n)
{
assert(n <= branches<B>);
auto m = heap::allocate(sizeof_inner_n(n));
auto p = new (m) node_t;
p->impl.d.data.inner.relaxed = nullptr;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
return p;
}
static node_t* make_inner_e(edit_t e)
{
auto m = heap::allocate(max_sizeof_inner);
auto p = new (m) node_t;
ownee(p) = e;
p->impl.d.data.inner.relaxed = nullptr;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
return p;
}
static node_t* make_inner_r_n(count_t n)
{
assert(n <= branches<B>);
auto mp = heap::allocate(sizeof_inner_r_n(n));
auto mr = static_cast<void*>(nullptr);
if (embed_relaxed) {
mr = reinterpret_cast<unsigned char*>(mp) + sizeof_inner_n(n);
} else {
try {
mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{});
} catch (...) {
heap::deallocate(sizeof_inner_r_n(n), mp);
throw;
}
}
auto p = new (mp) node_t;
auto r = new (mr) relaxed_t;
r->d.count = 0;
p->impl.d.data.inner.relaxed = r;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
return p;
}
static node_t* make_inner_sr_n(count_t n, relaxed_t* r)
{
return static_if<embed_relaxed, node_t*>(
[&](auto) { return node_t::make_inner_r_n(n); },
[&](auto) {
auto p =
new (heap::allocate(node_t::sizeof_inner_r_n(n))) node_t;
assert(r->d.count >= n);
node_t::refs(r).inc();
p->impl.d.data.inner.relaxed = r;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
return p;
});
}
static node_t* make_inner_r_e(edit_t e)
{
auto mp = heap::allocate(max_sizeof_inner_r);
auto mr = static_cast<void*>(nullptr);
if (embed_relaxed) {
mr = reinterpret_cast<unsigned char*>(mp) + max_sizeof_inner;
} else {
try {
mr = heap::allocate(max_sizeof_relaxed, norefs_tag{});
} catch (...) {
heap::deallocate(max_sizeof_inner_r, mp);
throw;
}
}
auto p = new (mp) node_t;
auto r = new (mr) relaxed_t;
ownee(p) = e;
static_if<!embed_relaxed>([&](auto) { node_t::ownee(r) = e; });
r->d.count = 0;
p->impl.d.data.inner.relaxed = r;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
return p;
}
static node_t* make_inner_sr_e(edit_t e, relaxed_t* r)
{
return static_if<embed_relaxed, node_t*>(
[&](auto) { return node_t::make_inner_r_e(e); },
[&](auto) {
auto p =
new (heap::allocate(node_t::max_sizeof_inner_r)) node_t;
node_t::refs(r).inc();
p->impl.d.data.inner.relaxed = r;
node_t::ownee(p) = e;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::inner;
#endif
return p;
});
}
static node_t* make_leaf_n(count_t n)
{
assert(n <= branches<BL>);
auto p = new (heap::allocate(sizeof_leaf_n(n))) node_t;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::leaf;
#endif
return p;
}
static node_t* make_leaf_e(edit_t e)
{
auto p = new (heap::allocate(max_sizeof_leaf)) node_t;
ownee(p) = e;
#if IMMER_TAGGED_NODE
p->impl.d.kind = node_t::kind_t::leaf;
#endif
return p;
}
static node_t* make_inner_n(count_t n, node_t* x)
{
assert(n >= 1);
auto p = make_inner_n(n);
p->inner()[0] = x;
return p;
}
static node_t* make_inner_n(edit_t n, node_t* x)
{
assert(n >= 1);
auto p = make_inner_n(n);
p->inner()[0] = x;
return p;
}
static node_t* make_inner_n(count_t n, node_t* x, node_t* y)
{
assert(n >= 2);
auto p = make_inner_n(n);
p->inner()[0] = x;
p->inner()[1] = y;
return p;
}
static node_t* make_inner_r_n(count_t n, node_t* x)
{
assert(n >= 1);
auto p = make_inner_r_n(n);
auto r = p->relaxed();
p->inner()[0] = x;
r->d.count = 1;
return p;
}
static node_t* make_inner_r_n(count_t n, node_t* x, size_t xs)
{
assert(n >= 1);
auto p = make_inner_r_n(n);
auto r = p->relaxed();
p->inner()[0] = x;
r->d.sizes[0] = xs;
r->d.count = 1;
return p;
}
static node_t* make_inner_r_n(count_t n, node_t* x, node_t* y)
{
assert(n >= 2);
auto p = make_inner_r_n(n);
auto r = p->relaxed();
p->inner()[0] = x;
p->inner()[1] = y;
r->d.count = 2;
return p;
}
static node_t* make_inner_r_n(count_t n, node_t* x, size_t xs, node_t* y)
{
assert(n >= 2);
auto p = make_inner_r_n(n);
auto r = p->relaxed();
p->inner()[0] = x;
p->inner()[1] = y;
r->d.sizes[0] = xs;
r->d.count = 2;
return p;
}
static node_t*
make_inner_r_n(count_t n, node_t* x, size_t xs, node_t* y, size_t ys)
{
assert(n >= 2);
auto p = make_inner_r_n(n);
auto r = p->relaxed();
p->inner()[0] = x;
p->inner()[1] = y;
r->d.sizes[0] = xs;
r->d.sizes[1] = xs + ys;
r->d.count = 2;
return p;
}
static node_t* make_inner_r_n(count_t n,
node_t* x,
size_t xs,
node_t* y,
size_t ys,
node_t* z,
size_t zs)
{
assert(n >= 3);
auto p = make_inner_r_n(n);
auto r = p->relaxed();
p->inner()[0] = x;
p->inner()[1] = y;
p->inner()[2] = z;
r->d.sizes[0] = xs;
r->d.sizes[1] = xs + ys;
r->d.sizes[2] = xs + ys + zs;
r->d.count = 3;
return p;
}
template <typename U>
static node_t* make_leaf_n(count_t n, U&& x)
{
assert(n >= 1);
auto p = make_leaf_n(n);
try {
new (p->leaf()) T{std::forward<U>(x)};
} catch (...) {
heap::deallocate(node_t::sizeof_leaf_n(n), p);
throw;
}
return p;
}
template <typename U>
static node_t* make_leaf_e(edit_t e, U&& x)
{
auto p = make_leaf_e(e);
try {
new (p->leaf()) T{std::forward<U>(x)};
} catch (...) {
heap::deallocate(node_t::max_sizeof_leaf, p);
throw;
}
return p;
}
static node_t* make_path(shift_t shift, node_t* node)
{
IMMER_ASSERT_TAGGED(node->kind() == kind_t::leaf);
if (shift == endshift<B, BL>)
return node;
else {
auto n = node_t::make_inner_n(1);
try {
n->inner()[0] = make_path(shift - B, node);
} catch (...) {
heap::deallocate(node_t::sizeof_inner_n(1), n);
throw;
}
return n;
}
}
static node_t* make_path_e(edit_t e, shift_t shift, node_t* node)
{
IMMER_ASSERT_TAGGED(node->kind() == kind_t::leaf);
if (shift == endshift<B, BL>)
return node;
else {
auto n = node_t::make_inner_e(e);
try {
n->inner()[0] = make_path_e(e, shift - B, node);
} catch (...) {
heap::deallocate(node_t::max_sizeof_inner, n);
throw;
}
return n;
}
}
static node_t* copy_inner(node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_n(n);
inc_nodes(src->inner(), n);
std::uninitialized_copy(src->inner(), src->inner() + n, dst->inner());
return dst;
}
static node_t* copy_inner_n(count_t allocn, node_t* src, count_t n)
{
assert(allocn >= n);
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_n(allocn);
return do_copy_inner(dst, src, n);
}
static node_t* copy_inner_e(edit_t e, node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_e(e);
return do_copy_inner(dst, src, n);
}
static node_t* do_copy_inner(node_t* dst, node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(dst->kind() == kind_t::inner);
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto p = src->inner();
inc_nodes(p, n);
std::uninitialized_copy(p, p + n, dst->inner());
return dst;
}
static node_t* copy_inner_r(node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_r_n(n);
return do_copy_inner_r(dst, src, n);
}
static node_t* copy_inner_r_n(count_t allocn, node_t* src, count_t n)
{
assert(allocn >= n);
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_r_n(allocn);
return do_copy_inner_r(dst, src, n);
}
static node_t* copy_inner_r_e(edit_t e, node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_r_e(e);
return do_copy_inner_r(dst, src, n);
}
static node_t* copy_inner_sr_e(edit_t e, node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto dst = make_inner_sr_e(e, src->relaxed());
return do_copy_inner_sr(dst, src, n);
}
static node_t* do_copy_inner_r(node_t* dst, node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(dst->kind() == kind_t::inner);
IMMER_ASSERT_TAGGED(src->kind() == kind_t::inner);
auto src_r = src->relaxed();
auto dst_r = dst->relaxed();
inc_nodes(src->inner(), n);
std::copy(src->inner(), src->inner() + n, dst->inner());
std::copy(src_r->d.sizes, src_r->d.sizes + n, dst_r->d.sizes);
dst_r->d.count = n;
return dst;
}
static node_t* do_copy_inner_sr(node_t* dst, node_t* src, count_t n)
{
if (embed_relaxed)
return do_copy_inner_r(dst, src, n);
else {
inc_nodes(src->inner(), n);
std::copy(src->inner(), src->inner() + n, dst->inner());
return dst;
}
}
static node_t* copy_leaf(node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
auto dst = make_leaf_n(n);
try {
std::uninitialized_copy(src->leaf(), src->leaf() + n, dst->leaf());
} catch (...) {
heap::deallocate(node_t::sizeof_leaf_n(n), dst);
throw;
}
return dst;
}
static node_t* copy_leaf_e(edit_t e, node_t* src, count_t n)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
auto dst = make_leaf_e(e);
try {
std::uninitialized_copy(src->leaf(), src->leaf() + n, dst->leaf());
} catch (...) {
heap::deallocate(node_t::max_sizeof_leaf, dst);
throw;
}
return dst;
}
static node_t* copy_leaf_n(count_t allocn, node_t* src, count_t n)
{
assert(allocn >= n);
IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
auto dst = make_leaf_n(allocn);
try {
std::uninitialized_copy(src->leaf(), src->leaf() + n, dst->leaf());
} catch (...) {
heap::deallocate(node_t::sizeof_leaf_n(allocn), dst);
throw;
}
return dst;
}
static node_t* copy_leaf(node_t* src1, count_t n1, node_t* src2, count_t n2)
{
IMMER_ASSERT_TAGGED(src1->kind() == kind_t::leaf);
IMMER_ASSERT_TAGGED(src2->kind() == kind_t::leaf);
auto dst = make_leaf_n(n1 + n2);
try {
std::uninitialized_copy(
src1->leaf(), src1->leaf() + n1, dst->leaf());
} catch (...) {
heap::deallocate(node_t::sizeof_leaf_n(n1 + n2), dst);
throw;
}
try {
std::uninitialized_copy(
src2->leaf(), src2->leaf() + n2, dst->leaf() + n1);
} catch (...) {
destroy_n(dst->leaf(), n1);
heap::deallocate(node_t::sizeof_leaf_n(n1 + n2), dst);
throw;
}
return dst;
}
static node_t*
copy_leaf_e(edit_t e, node_t* src1, count_t n1, node_t* src2, count_t n2)
{
IMMER_ASSERT_TAGGED(src1->kind() == kind_t::leaf);
IMMER_ASSERT_TAGGED(src2->kind() == kind_t::leaf);
auto dst = make_leaf_e(e);
try {
std::uninitialized_copy(
src1->leaf(), src1->leaf() + n1, dst->leaf());
} catch (...) {
heap::deallocate(max_sizeof_leaf, dst);
throw;
}
try {
std::uninitialized_copy(
src2->leaf(), src2->leaf() + n2, dst->leaf() + n1);
} catch (...) {
destroy_n(dst->leaf(), n1);
heap::deallocate(max_sizeof_leaf, dst);
throw;
}
return dst;
}
static node_t* copy_leaf_e(edit_t e, node_t* src, count_t idx, count_t last)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
auto dst = make_leaf_e(e);
try {
std::uninitialized_copy(
src->leaf() + idx, src->leaf() + last, dst->leaf());
} catch (...) {
heap::deallocate(max_sizeof_leaf, dst);
throw;
}
return dst;
}
static node_t* copy_leaf(node_t* src, count_t idx, count_t last)
{
IMMER_ASSERT_TAGGED(src->kind() == kind_t::leaf);
auto dst = make_leaf_n(last - idx);
try {
std::uninitialized_copy(
src->leaf() + idx, src->leaf() + last, dst->leaf());
} catch (...) {
heap::deallocate(node_t::sizeof_leaf_n(last - idx), dst);
throw;
}
return dst;
}
template <typename U>
static node_t* copy_leaf_emplace(node_t* src, count_t n, U&& x)
{
auto dst = copy_leaf_n(n + 1, src, n);
try {
new (dst->leaf() + n) T{std::forward<U>(x)};
} catch (...) {
destroy_n(dst->leaf(), n);
heap::deallocate(node_t::sizeof_leaf_n(n + 1), dst);
throw;
}
return dst;
}
static void delete_inner(node_t* p, count_t n)
{
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
assert(!p->relaxed());
heap::deallocate(ownee(p).owned() ? node_t::max_sizeof_inner
: node_t::sizeof_inner_n(n),
p);
}
static void delete_inner_e(node_t* p)
{
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
assert(!p->relaxed());
heap::deallocate(node_t::max_sizeof_inner, p);
}
static void delete_inner_any(node_t* p, count_t n)
{
if (p->relaxed())
delete_inner_r(p, n);
else
delete_inner(p, n);
}
static void delete_inner_r(node_t* p, count_t n)
{
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
auto r = p->relaxed();
assert(r);
static_if<!embed_relaxed>([&](auto) {
if (node_t::refs(r).dec())
heap::deallocate(node_t::ownee(r).owned()
? node_t::max_sizeof_relaxed
: node_t::sizeof_relaxed_n(n),
r);
});
heap::deallocate(ownee(p).owned() ? node_t::max_sizeof_inner_r
: node_t::sizeof_inner_r_n(n),
p);
}
static void delete_inner_r_e(node_t* p)
{
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
auto r = p->relaxed();
assert(r);
static_if<!embed_relaxed>([&](auto) {
if (node_t::refs(r).dec())
heap::deallocate(node_t::max_sizeof_relaxed, r);
});
heap::deallocate(node_t::max_sizeof_inner_r, p);
}
static void delete_leaf(node_t* p, count_t n)
{
IMMER_ASSERT_TAGGED(p->kind() == kind_t::leaf);
destroy_n(p->leaf(), n);
heap::deallocate(ownee(p).owned() ? node_t::max_sizeof_leaf
: node_t::sizeof_leaf_n(n),
p);
}
bool can_mutate(edit_t e) const
{
return refs(this).unique() || ownee(this).can_mutate(e);
}
bool can_relax() const { return !embed_relaxed || relaxed(); }
relaxed_t* ensure_mutable_relaxed(edit_t e)
{
auto src_r = relaxed();
return static_if<embed_relaxed, relaxed_t*>(
[&](auto) { return src_r; },
[&](auto) {
if (node_t::refs(src_r).unique() ||
node_t::ownee(src_r).can_mutate(e))
return src_r;
else {
if (src_r)
node_t::refs(src_r).dec_unsafe();
auto dst_r = impl.d.data.inner.relaxed =
new (heap::allocate(max_sizeof_relaxed)) relaxed_t;
node_t::ownee(dst_r) = e;
return dst_r;
}
});
}
relaxed_t* ensure_mutable_relaxed_e(edit_t e, edit_t ec)
{
auto src_r = relaxed();
return static_if<embed_relaxed, relaxed_t*>(
[&](auto) { return src_r; },
[&](auto) {
if (src_r && (node_t::refs(src_r).unique() ||
node_t::ownee(src_r).can_mutate(e))) {
node_t::ownee(src_r) = ec;
return src_r;
} else {
if (src_r)
node_t::refs(src_r).dec_unsafe();
auto dst_r = impl.d.data.inner.relaxed =
new (heap::allocate(max_sizeof_relaxed)) relaxed_t;
node_t::ownee(dst_r) = ec;
return dst_r;
}
});
}
relaxed_t* ensure_mutable_relaxed_n(edit_t e, count_t n)
{
auto src_r = relaxed();
return static_if<embed_relaxed, relaxed_t*>(
[&](auto) { return src_r; },
[&](auto) {
if (node_t::refs(src_r).unique() ||
node_t::ownee(src_r).can_mutate(e))
return src_r;
else {
if (src_r)
node_t::refs(src_r).dec_unsafe();
auto dst_r =
new (heap::allocate(max_sizeof_relaxed)) relaxed_t;
std::copy(
src_r->d.sizes, src_r->d.sizes + n, dst_r->d.sizes);
node_t::ownee(dst_r) = e;
return impl.d.data.inner.relaxed = dst_r;
}
});
}
node_t* inc()
{
refs(this).inc();
return this;
}
const node_t* inc() const
{
refs(this).inc();
return this;
}
bool dec() const { return refs(this).dec(); }
void dec_unsafe() const { refs(this).dec_unsafe(); }
static void inc_nodes(node_t** p, count_t n)
{
for (auto i = p, e = i + n; i != e; ++i)
refs(*i).inc();
}
#if IMMER_TAGGED_NODE
shift_t compute_shift()
{
if (kind() == kind_t::leaf)
return endshift<B, BL>;
else
return B + inner()[0]->compute_shift();
}
#endif
bool check(shift_t shift, size_t size)
{
#if IMMER_DEBUG_DEEP_CHECK
assert(size > 0);
if (shift == endshift<B, BL>) {
IMMER_ASSERT_TAGGED(kind() == kind_t::leaf);
assert(size <= branches<BL>);
} else if (auto r = relaxed()) {
auto count = r->d.count;
assert(count > 0);
assert(count <= branches<B>);
if (r->d.sizes[count - 1] != size) {
IMMER_TRACE_F("check");
IMMER_TRACE_E(r->d.sizes[count - 1]);
IMMER_TRACE_E(size);
}
assert(r->d.sizes[count - 1] == size);
for (auto i = 1; i < count; ++i)
assert(r->d.sizes[i - 1] < r->d.sizes[i]);
auto last_size = size_t{};
for (auto i = 0; i < count; ++i) {
assert(inner()[i]->check(shift - B, r->d.sizes[i] - last_size));
last_size = r->d.sizes[i];
}
} else {
assert(size <= branches<B> << shift);
auto count =
(size >> shift) + (size - ((size >> shift) << shift) > 0);
assert(count <= branches<B>);
if (count) {
for (auto i = 1; i < count - 1; ++i)
assert(inner()[i]->check(shift - B, 1 << shift));
assert(inner()[count - 1]->check(
shift - B, size - ((count - 1) << shift)));
}
}
#endif // IMMER_DEBUG_DEEP_CHECK
return true;
}
};
template <typename T, typename MP, bits_t B>
constexpr bits_t derive_bits_leaf_aux()
{
using node_t = node<T, MP, B, B>;
constexpr auto sizeof_elem = sizeof(T);
constexpr auto space =
node_t::max_sizeof_inner - node_t::sizeof_packed_leaf_n(0);
constexpr auto full_elems = space / sizeof_elem;
constexpr auto BL = log2(full_elems);
return BL;
}
template <typename T, typename MP, bits_t B>
constexpr bits_t derive_bits_leaf = derive_bits_leaf_aux<T, MP, B>();
} // namespace rbts
} // namespace detail
} // namespace immer

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,509 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/rbts/node.hpp>
#include <immer/detail/rbts/operations.hpp>
#include <immer/detail/rbts/position.hpp>
#include <immer/detail/type_traits.hpp>
#include <cassert>
#include <memory>
#include <numeric>
namespace immer {
namespace detail {
namespace rbts {
template <typename T, typename MemoryPolicy, bits_t B, bits_t BL>
struct rbtree
{
using node_t = node<T, MemoryPolicy, B, BL>;
using edit_t = typename node_t::edit_t;
using owner_t = typename MemoryPolicy::transience_t::owner;
size_t size;
shift_t shift;
node_t* root;
node_t* tail;
static const rbtree& empty()
{
static const rbtree empty_{
0, BL, node_t::make_inner_n(0u), node_t::make_leaf_n(0u)};
return empty_;
}
template <typename U>
static auto from_initializer_list(std::initializer_list<U> values)
{
auto e = owner_t{};
auto result = rbtree{empty()};
for (auto&& v : values)
result.push_back_mut(e, v);
return result;
}
template <typename Iter,
typename Sent,
std::enable_if_t<compatible_sentinel_v<Iter, Sent>, bool> = true>
static auto from_range(Iter first, Sent last)
{
auto e = owner_t{};
auto result = rbtree{empty()};
for (; first != last; ++first)
result.push_back_mut(e, *first);
return result;
}
static auto from_fill(size_t n, T v)
{
auto e = owner_t{};
auto result = rbtree{empty()};
while (n-- > 0)
result.push_back_mut(e, v);
return result;
}
rbtree(size_t sz, shift_t sh, node_t* r, node_t* t)
: size{sz}
, shift{sh}
, root{r}
, tail{t}
{
assert(check_tree());
}
rbtree(const rbtree& other)
: rbtree{other.size, other.shift, other.root, other.tail}
{
inc();
}
rbtree(rbtree&& other)
: rbtree{empty()}
{
swap(*this, other);
}
rbtree& operator=(const rbtree& other)
{
auto next = other;
swap(*this, next);
return *this;
}
rbtree& operator=(rbtree&& other)
{
swap(*this, other);
return *this;
}
friend void swap(rbtree& x, rbtree& y)
{
using std::swap;
swap(x.size, y.size);
swap(x.shift, y.shift);
swap(x.root, y.root);
swap(x.tail, y.tail);
}
~rbtree() { dec(); }
void inc() const
{
root->inc();
tail->inc();
}
void dec() const { traverse(dec_visitor()); }
auto tail_size() const { return size ? ((size - 1) & mask<BL>) +1 : 0; }
auto tail_offset() const { return size ? (size - 1) & ~mask<BL> : 0; }
template <typename Visitor, typename... Args>
void traverse(Visitor v, Args&&... args) const
{
auto tail_off = tail_offset();
auto tail_size = size - tail_off;
if (tail_off)
make_regular_sub_pos(root, shift, tail_off).visit(v, args...);
else
make_empty_regular_pos(root).visit(v, args...);
make_leaf_sub_pos(tail, tail_size).visit(v, args...);
}
template <typename Visitor, typename... Args>
void traverse(Visitor v, size_t first, size_t last, Args&&... args) const
{
auto tail_off = tail_offset();
auto tail_size = size - tail_off;
if (first < tail_off)
make_regular_sub_pos(root, shift, tail_off)
.visit(v, first, last < tail_off ? last : tail_off, args...);
if (last > tail_off)
make_leaf_sub_pos(tail, tail_size)
.visit(v,
first > tail_off ? first - tail_off : 0,
last - tail_off,
args...);
}
template <typename Visitor, typename... Args>
bool traverse_p(Visitor v, Args&&... args) const
{
auto tail_off = tail_offset();
auto tail_size = size - tail_off;
return (tail_off ? make_regular_sub_pos(root, shift, tail_off)
.visit(v, args...)
: make_empty_regular_pos(root).visit(v, args...)) &&
make_leaf_sub_pos(tail, tail_size).visit(v, args...);
}
template <typename Visitor, typename... Args>
bool traverse_p(Visitor v, size_t first, size_t last, Args&&... args) const
{
auto tail_off = tail_offset();
auto tail_size = size - tail_off;
return (first < tail_off ? make_regular_sub_pos(root, shift, tail_off)
.visit(v,
first,
last < tail_off ? last : tail_off,
args...)
: true) &&
(last > tail_off
? make_leaf_sub_pos(tail, tail_size)
.visit(v,
first > tail_off ? first - tail_off : 0,
last - tail_off,
args...)
: true);
}
template <typename Visitor>
decltype(auto) descend(Visitor v, size_t idx) const
{
auto tail_off = tail_offset();
return idx >= tail_off ? make_leaf_descent_pos(tail).visit(v, idx)
: visit_regular_descent(root, shift, v, idx);
}
template <typename Fn>
void for_each_chunk(Fn&& fn) const
{
traverse(for_each_chunk_visitor{}, std::forward<Fn>(fn));
}
template <typename Fn>
void for_each_chunk(size_t first, size_t last, Fn&& fn) const
{
traverse(for_each_chunk_i_visitor{}, first, last, std::forward<Fn>(fn));
}
template <typename Fn>
bool for_each_chunk_p(Fn&& fn) const
{
return traverse_p(for_each_chunk_p_visitor{}, std::forward<Fn>(fn));
}
template <typename Fn>
bool for_each_chunk_p(size_t first, size_t last, Fn&& fn) const
{
return traverse_p(
for_each_chunk_p_i_visitor{}, first, last, std::forward<Fn>(fn));
}
bool equals(const rbtree& other) const
{
if (size != other.size)
return false;
if (size == 0)
return true;
return (size <= branches<BL> ||
make_regular_sub_pos(root, shift, tail_offset())
.visit(equals_visitor{}, other.root)) &&
make_leaf_sub_pos(tail, tail_size())
.visit(equals_visitor{}, other.tail);
}
void ensure_mutable_tail(edit_t e, count_t n)
{
if (!tail->can_mutate(e)) {
auto new_tail = node_t::copy_leaf_e(e, tail, n);
dec_leaf(tail, n);
tail = new_tail;
}
}
void push_back_mut(edit_t e, T value)
{
auto tail_off = tail_offset();
auto ts = size - tail_off;
if (ts < branches<BL>) {
ensure_mutable_tail(e, ts);
new (&tail->leaf()[ts]) T{std::move(value)};
} else {
auto new_tail = node_t::make_leaf_e(e, std::move(value));
try {
if (tail_off == size_t{branches<B>} << shift) {
auto new_root = node_t::make_inner_e(e);
try {
auto path = node_t::make_path_e(e, shift, tail);
new_root->inner()[0] = root;
new_root->inner()[1] = path;
root = new_root;
tail = new_tail;
shift += B;
} catch (...) {
node_t::delete_inner_e(new_root);
throw;
}
} else if (tail_off) {
auto new_root =
make_regular_sub_pos(root, shift, tail_off)
.visit(push_tail_mut_visitor<node_t>{}, e, tail);
root = new_root;
tail = new_tail;
} else {
auto new_root = node_t::make_path_e(e, shift, tail);
assert(tail_off == 0);
dec_empty_regular(root);
root = new_root;
tail = new_tail;
}
} catch (...) {
node_t::delete_leaf(new_tail, 1);
throw;
}
}
++size;
}
rbtree push_back(T value) const
{
auto tail_off = tail_offset();
auto ts = size - tail_off;
if (ts < branches<BL>) {
auto new_tail =
node_t::copy_leaf_emplace(tail, ts, std::move(value));
return {size + 1, shift, root->inc(), new_tail};
} else {
auto new_tail = node_t::make_leaf_n(1, std::move(value));
try {
if (tail_off == size_t{branches<B>} << shift) {
auto new_root = node_t::make_inner_n(2);
try {
auto path = node_t::make_path(shift, tail);
new_root->inner()[0] = root;
new_root->inner()[1] = path;
root->inc();
tail->inc();
return {size + 1, shift + B, new_root, new_tail};
} catch (...) {
node_t::delete_inner(new_root, 2);
throw;
}
} else if (tail_off) {
auto new_root =
make_regular_sub_pos(root, shift, tail_off)
.visit(push_tail_visitor<node_t>{}, tail);
tail->inc();
return {size + 1, shift, new_root, new_tail};
} else {
auto new_root = node_t::make_path(shift, tail);
tail->inc();
return {size + 1, shift, new_root, new_tail};
}
} catch (...) {
node_t::delete_leaf(new_tail, 1);
throw;
}
}
}
const T* array_for(size_t index) const
{
return descend(array_for_visitor<T>(), index);
}
T& get_mut(edit_t e, size_t idx)
{
auto tail_off = tail_offset();
if (idx >= tail_off) {
ensure_mutable_tail(e, size - tail_off);
return tail->leaf()[idx & mask<BL>];
} else {
return make_regular_sub_pos(root, shift, tail_off)
.visit(get_mut_visitor<node_t>{}, idx, e, &root);
}
}
const T& get(size_t index) const
{
return descend(get_visitor<T>(), index);
}
const T& get_check(size_t index) const
{
if (index >= size)
throw std::out_of_range{"index out of range"};
return descend(get_visitor<T>(), index);
}
const T& front() const { return get(0); }
const T& back() const { return tail->leaf()[(size - 1) & mask<BL>]; }
template <typename FnT>
void update_mut(edit_t e, size_t idx, FnT&& fn)
{
auto& elem = get_mut(e, idx);
elem = std::forward<FnT>(fn)(std::move(elem));
}
template <typename FnT>
rbtree update(size_t idx, FnT&& fn) const
{
auto tail_off = tail_offset();
if (idx >= tail_off) {
auto tail_size = size - tail_off;
auto new_tail =
make_leaf_sub_pos(tail, tail_size)
.visit(update_visitor<node_t>{}, idx - tail_off, fn);
return {size, shift, root->inc(), new_tail};
} else {
auto new_root = make_regular_sub_pos(root, shift, tail_off)
.visit(update_visitor<node_t>{}, idx, fn);
return {size, shift, new_root, tail->inc()};
}
}
void assoc_mut(edit_t e, size_t idx, T value)
{
update_mut(e, idx, [&](auto&&) { return std::move(value); });
}
rbtree assoc(size_t idx, T value) const
{
return update(idx, [&](auto&&) { return std::move(value); });
}
rbtree take(size_t new_size) const
{
auto tail_off = tail_offset();
if (new_size == 0) {
return empty();
} else if (new_size >= size) {
return *this;
} else if (new_size > tail_off) {
auto new_tail = node_t::copy_leaf(tail, new_size - tail_off);
return {new_size, shift, root->inc(), new_tail};
} else {
using std::get;
auto l = new_size - 1;
auto v = slice_right_visitor<node_t>();
auto r = make_regular_sub_pos(root, shift, tail_off).visit(v, l);
auto new_shift = get<0>(r);
auto new_root = get<1>(r);
auto new_tail = get<3>(r);
if (new_root) {
IMMER_ASSERT_TAGGED(new_root->compute_shift() == get<0>(r));
assert(new_root->check(new_shift, new_size - get<2>(r)));
return {new_size, new_shift, new_root, new_tail};
} else {
return {new_size, BL, empty().root->inc(), new_tail};
}
}
}
void take_mut(edit_t e, size_t new_size)
{
auto tail_off = tail_offset();
if (new_size == 0) {
// todo: more efficient?
*this = empty();
} else if (new_size >= size) {
return;
} else if (new_size > tail_off) {
auto ts = size - tail_off;
auto newts = new_size - tail_off;
if (tail->can_mutate(e)) {
destroy_n(tail->leaf() + newts, ts - newts);
} else {
auto new_tail = node_t::copy_leaf_e(e, tail, newts);
dec_leaf(tail, ts);
tail = new_tail;
}
size = new_size;
return;
} else {
using std::get;
auto l = new_size - 1;
auto v = slice_right_mut_visitor<node_t>();
auto r = make_regular_sub_pos(root, shift, tail_off).visit(v, l, e);
auto new_shift = get<0>(r);
auto new_root = get<1>(r);
auto new_tail = get<3>(r);
if (new_root) {
root = new_root;
shift = new_shift;
} else {
root = empty().root->inc();
shift = BL;
}
dec_leaf(tail, size - tail_off);
size = new_size;
tail = new_tail;
return;
}
}
bool check_tree() const
{
#if IMMER_DEBUG_DEEP_CHECK
assert(shift >= BL);
assert(tail_offset() <= size);
assert(check_root());
assert(check_tail());
#endif
return true;
}
bool check_tail() const
{
#if IMMER_DEBUG_DEEP_CHECK
if (tail_size() > 0)
assert(tail->check(0, tail_size()));
#endif
return true;
}
bool check_root() const
{
#if IMMER_DEBUG_DEEP_CHECK
if (tail_offset() > 0)
assert(root->check(shift, tail_offset()));
else {
IMMER_ASSERT_TAGGED(root->kind() == node_t::kind_t::inner);
assert(shift == BL);
}
#endif
return true;
}
};
} // namespace rbts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,99 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/iterator_facade.hpp>
#include <immer/detail/rbts/rbtree.hpp>
namespace immer {
namespace detail {
namespace rbts {
template <typename T, typename MP, bits_t B, bits_t BL>
struct rbtree_iterator
: iterator_facade<rbtree_iterator<T, MP, B, BL>,
std::random_access_iterator_tag,
T,
const T&,
std::ptrdiff_t,
const T*>
{
using tree_t = rbtree<T, MP, B, BL>;
struct end_t
{};
rbtree_iterator() = default;
rbtree_iterator(const tree_t& v)
: v_{&v}
, i_{0}
, base_{~size_t{}}
, curr_{nullptr}
{}
rbtree_iterator(const tree_t& v, end_t)
: v_{&v}
, i_{v.size}
, base_{~size_t{}}
, curr_{nullptr}
{}
const tree_t& impl() const { return *v_; }
size_t index() const { return i_; }
private:
friend iterator_core_access;
const tree_t* v_;
size_t i_;
mutable size_t base_;
mutable const T* curr_ = nullptr;
void increment()
{
assert(i_ < v_->size);
++i_;
}
void decrement()
{
assert(i_ > 0);
--i_;
}
void advance(std::ptrdiff_t n)
{
assert(n <= 0 || i_ + static_cast<size_t>(n) <= v_->size);
assert(n >= 0 || static_cast<size_t>(-n) <= i_);
i_ += n;
}
bool equal(const rbtree_iterator& other) const { return i_ == other.i_; }
std::ptrdiff_t distance_to(const rbtree_iterator& other) const
{
return other.i_ > i_ ? static_cast<std::ptrdiff_t>(other.i_ - i_)
: -static_cast<std::ptrdiff_t>(i_ - other.i_);
}
const T& dereference() const
{
auto base = i_ & ~mask<BL>;
if (base_ != base) {
base_ = base;
curr_ = v_->array_for(i_);
}
return curr_[i_ & mask<BL>];
}
};
} // namespace rbts
} // namespace detail
} // namespace immer

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,98 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/iterator_facade.hpp>
#include <immer/detail/rbts/rrbtree.hpp>
namespace immer {
namespace detail {
namespace rbts {
template <typename T, typename MP, bits_t B, bits_t BL>
struct rrbtree_iterator
: iterator_facade<rrbtree_iterator<T, MP, B, BL>,
std::random_access_iterator_tag,
T,
const T&,
std::ptrdiff_t,
const T*>
{
using tree_t = rrbtree<T, MP, B, BL>;
using region_t = std::tuple<const T*, size_t, size_t>;
struct end_t
{};
const tree_t& impl() const { return *v_; }
size_t index() const { return i_; }
rrbtree_iterator() = default;
rrbtree_iterator(const tree_t& v)
: v_{&v}
, i_{0}
, curr_{nullptr, ~size_t{}, ~size_t{}}
{}
rrbtree_iterator(const tree_t& v, end_t)
: v_{&v}
, i_{v.size}
, curr_{nullptr, ~size_t{}, ~size_t{}}
{}
private:
friend iterator_core_access;
const tree_t* v_;
size_t i_;
mutable region_t curr_;
void increment()
{
using std::get;
assert(i_ < v_->size);
++i_;
}
void decrement()
{
using std::get;
assert(i_ > 0);
--i_;
}
void advance(std::ptrdiff_t n)
{
using std::get;
assert(n <= 0 || i_ + static_cast<size_t>(n) <= v_->size);
assert(n >= 0 || static_cast<size_t>(-n) <= i_);
i_ += n;
}
bool equal(const rrbtree_iterator& other) const { return i_ == other.i_; }
std::ptrdiff_t distance_to(const rrbtree_iterator& other) const
{
return other.i_ > i_ ? static_cast<std::ptrdiff_t>(other.i_ - i_)
: -static_cast<std::ptrdiff_t>(i_ - other.i_);
}
const T& dereference() const
{
using std::get;
if (i_ < get<1>(curr_) || i_ >= get<2>(curr_))
curr_ = v_->region_for(i_);
return get<0>(curr_)[i_ - get<1>(curr_)];
}
};
} // namespace rbts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,56 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <tuple>
#include <utility>
namespace immer {
namespace detail {
namespace rbts {
template <typename Deriv>
struct visitor_base
{
template <typename... Args>
static decltype(auto) visit_node(Args&&... args)
{
IMMER_UNREACHABLE;
}
template <typename... Args>
static decltype(auto) visit_relaxed(Args&&... args)
{
return Deriv::visit_inner(std::forward<Args>(args)...);
}
template <typename... Args>
static decltype(auto) visit_regular(Args&&... args)
{
return Deriv::visit_inner(std::forward<Args>(args)...);
}
template <typename... Args>
static decltype(auto) visit_inner(Args&&... args)
{
return Deriv::visit_node(std::forward<Args>(args)...);
}
template <typename... Args>
static decltype(auto) visit_leaf(Args&&... args)
{
return Deriv::visit_node(std::forward<Args>(args)...);
}
};
} // namespace rbts
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,36 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <atomic>
namespace immer {
namespace detail {
template <typename Deriv>
struct ref_count_base
{
mutable std::atomic<int> ref_count{0};
friend void intrusive_ptr_add_ref(const Deriv* x)
{
x->ref_count.fetch_add(1, std::memory_order_relaxed);
}
friend void intrusive_ptr_release(const Deriv* x)
{
if (x->ref_count.fetch_sub(1, std::memory_order_release) == 1) {
std::atomic_thread_fence(std::memory_order_acquire);
delete x;
}
}
};
} /* namespace detail */
} /* namespace immer */

View file

@ -0,0 +1,223 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <algorithm>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>
namespace immer {
namespace detail {
template <typename... Ts>
struct make_void
{
using type = void;
};
template <typename... Ts>
using void_t = typename make_void<Ts...>::type;
template <typename T, typename = void>
struct is_dereferenceable : std::false_type
{};
template <typename T>
struct is_dereferenceable<T, void_t<decltype(*(std::declval<T&>()))>>
: std::true_type
{};
template <typename T>
constexpr bool is_dereferenceable_v = is_dereferenceable<T>::value;
template <typename T, typename U = T, typename = void>
struct is_equality_comparable : std::false_type
{};
template <typename T, typename U>
struct is_equality_comparable<
T,
U,
std::enable_if_t<std::is_same<bool,
decltype(std::declval<T&>() ==
std::declval<U&>())>::value>>
: std::true_type
{};
template <typename T, typename U = T>
constexpr bool is_equality_comparable_v = is_equality_comparable<T, U>::value;
template <typename T, typename U = T, typename = void>
struct is_inequality_comparable : std::false_type
{};
template <typename T, typename U>
struct is_inequality_comparable<
T,
U,
std::enable_if_t<std::is_same<bool,
decltype(std::declval<T&>() !=
std::declval<U&>())>::value>>
: std::true_type
{};
template <typename T, typename U = T>
constexpr bool is_inequality_comparable_v =
is_inequality_comparable<T, U>::value;
template <typename T, typename = void>
struct is_preincrementable : std::false_type
{};
template <typename T>
struct is_preincrementable<
T,
std::enable_if_t<std::is_same<T&, decltype(++(std::declval<T&>()))>::value>>
: std::true_type
{};
template <typename T>
constexpr bool is_preincrementable_v = is_preincrementable<T>::value;
template <typename T, typename U = T, typename = void>
struct is_subtractable : std::false_type
{};
template <typename T, typename U>
struct is_subtractable<
T,
U,
void_t<decltype(std::declval<T&>() - std::declval<U&>())>> : std::true_type
{};
template <typename T, typename U = T>
constexpr bool is_subtractable_v = is_subtractable<T, U>::value;
namespace swappable {
using std::swap;
template <typename T, typename U, typename = void>
struct with : std::false_type
{};
// Does not account for non-referenceable types
template <typename T, typename U>
struct with<T,
U,
void_t<decltype(swap(std::declval<T&>(), std::declval<U&>())),
decltype(swap(std::declval<U&>(), std::declval<T&>()))>>
: std::true_type
{};
} // namespace swappable
template <typename T, typename U>
using is_swappable_with = swappable::with<T, U>;
template <typename T>
using is_swappable = is_swappable_with<T, T>;
template <typename T>
constexpr bool is_swappable_v = is_swappable_with<T&, T&>::value;
template <typename T, typename = void>
struct is_iterator : std::false_type
{};
// See http://en.cppreference.com/w/cpp/concept/Iterator
template <typename T>
struct is_iterator<
T,
void_t<
std::enable_if_t<is_preincrementable_v<T> &&
is_dereferenceable_v<T>
// accounts for non-referenceable types
&& std::is_copy_constructible<T>::value &&
std::is_copy_assignable<T>::value &&
std::is_destructible<T>::value && is_swappable_v<T>>,
typename std::iterator_traits<T>::value_type,
typename std::iterator_traits<T>::difference_type,
typename std::iterator_traits<T>::reference,
typename std::iterator_traits<T>::pointer,
typename std::iterator_traits<T>::iterator_category>> : std::true_type
{};
template <typename T>
constexpr bool is_iterator_v = is_iterator<T>::value;
template <typename T, typename U, typename = void>
struct compatible_sentinel : std::false_type
{};
template <typename T, typename U>
struct compatible_sentinel<
T,
U,
std::enable_if_t<is_iterator_v<T> && is_equality_comparable_v<T, U> &&
is_inequality_comparable_v<T, U>>> : std::true_type
{};
template <typename T, typename U>
constexpr bool compatible_sentinel_v = compatible_sentinel<T, U>::value;
template <typename T, typename = void>
struct is_forward_iterator : std::false_type
{};
template <typename T>
struct is_forward_iterator<
T,
std::enable_if_t<is_iterator_v<T> &&
std::is_base_of<std::forward_iterator_tag,
typename std::iterator_traits<
T>::iterator_category>::value>>
: std::true_type
{};
template <typename T>
constexpr bool is_forward_iterator_v = is_forward_iterator<T>::value;
template <typename T, typename U, typename = void>
struct std_distance_supports : std::false_type
{};
template <typename T, typename U>
struct std_distance_supports<
T,
U,
void_t<decltype(std::distance(std::declval<T>(), std::declval<U>()))>>
: std::true_type
{};
template <typename T, typename U>
constexpr bool std_distance_supports_v = std_distance_supports<T, U>::value;
template <typename T, typename U, typename V, typename = void>
struct std_uninitialized_copy_supports : std::false_type
{};
template <typename T, typename U, typename V>
struct std_uninitialized_copy_supports<
T,
U,
V,
void_t<decltype(std::uninitialized_copy(
std::declval<T>(), std::declval<U>(), std::declval<V>()))>>
: std::true_type
{};
template <typename T, typename U, typename V>
constexpr bool std_uninitialized_copy_supports_v =
std_uninitialized_copy_supports<T, U, V>::value;
} // namespace detail
} // namespace immer

258
third_party/immer/immer/detail/util.hpp vendored Normal file
View file

@ -0,0 +1,258 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <cstddef>
#include <memory>
#include <new>
#include <type_traits>
#include <immer/detail/type_traits.hpp>
#if defined(_MSC_VER)
#include <intrin.h> // for __lzcnt*
#endif
namespace immer {
namespace detail {
template <typename T>
using aligned_storage_for =
typename std::aligned_storage<sizeof(T), alignof(T)>::type;
template <typename T>
T& auto_const_cast(const T& x)
{
return const_cast<T&>(x);
}
template <typename T>
T&& auto_const_cast(const T&& x)
{
return const_cast<T&&>(std::move(x));
}
template <typename Iter1, typename Iter2>
auto uninitialized_move(Iter1 in1, Iter1 in2, Iter2 out)
{
return std::uninitialized_copy(
std::make_move_iterator(in1), std::make_move_iterator(in2), out);
}
template <class T>
void destroy(T* first, T* last)
{
for (; first != last; ++first)
first->~T();
}
template <class T, class Size>
void destroy_n(T* p, Size n)
{
auto e = p + n;
for (; p != e; ++p)
p->~T();
}
template <typename Heap, typename T, typename... Args>
T* make(Args&&... args)
{
auto ptr = Heap::allocate(sizeof(T));
try {
return new (ptr) T{std::forward<Args>(args)...};
} catch (...) {
Heap::deallocate(sizeof(T), ptr);
throw;
}
}
struct not_supported_t
{};
struct empty_t
{};
template <typename T>
struct exact_t
{
T value;
exact_t(T v)
: value{v} {};
};
template <typename T>
inline constexpr auto clz_(T) -> not_supported_t
{
IMMER_UNREACHABLE;
return {};
}
#if defined(_MSC_VER)
// inline auto clz_(unsigned short x) { return __lzcnt16(x); }
// inline auto clz_(unsigned int x) { return __lzcnt(x); }
// inline auto clz_(unsigned __int64 x) { return __lzcnt64(x); }
#else
inline constexpr auto clz_(unsigned int x) { return __builtin_clz(x); }
inline constexpr auto clz_(unsigned long x) { return __builtin_clzl(x); }
inline constexpr auto clz_(unsigned long long x) { return __builtin_clzll(x); }
#endif
template <typename T>
inline constexpr T log2_aux(T x, T r = 0)
{
return x <= 1 ? r : log2_aux(x >> 1, r + 1);
}
template <typename T>
inline constexpr auto log2(T x) -> std::
enable_if_t<!std::is_same<decltype(clz_(x)), not_supported_t>::value, T>
{
return x == 0 ? 0 : sizeof(std::size_t) * 8 - 1 - clz_(x);
}
template <typename T>
inline constexpr auto log2(T x)
-> std::enable_if_t<std::is_same<decltype(clz_(x)), not_supported_t>::value,
T>
{
return log2_aux(x);
}
template <bool b, typename F>
auto static_if(F&& f) -> std::enable_if_t<b>
{
std::forward<F>(f)(empty_t{});
}
template <bool b, typename F>
auto static_if(F&& f) -> std::enable_if_t<!b>
{}
template <bool b, typename R = void, typename F1, typename F2>
auto static_if(F1&& f1, F2&& f2) -> std::enable_if_t<b, R>
{
return std::forward<F1>(f1)(empty_t{});
}
template <bool b, typename R = void, typename F1, typename F2>
auto static_if(F1&& f1, F2&& f2) -> std::enable_if_t<!b, R>
{
return std::forward<F2>(f2)(empty_t{});
}
template <typename T, T value>
struct constantly
{
template <typename... Args>
T operator()(Args&&...) const
{
return value;
}
};
/*!
* An alias to `std::distance`
*/
template <typename Iterator,
typename Sentinel,
std::enable_if_t<detail::std_distance_supports_v<Iterator, Sentinel>,
bool> = true>
typename std::iterator_traits<Iterator>::difference_type
distance(Iterator first, Sentinel last)
{
return std::distance(first, last);
}
/*!
* Equivalent of the `std::distance` applied to the sentinel-delimited
* forward range @f$ [first, last) @f$
*/
template <typename Iterator,
typename Sentinel,
std::enable_if_t<
(!detail::std_distance_supports_v<Iterator, Sentinel>) &&detail::
is_forward_iterator_v<Iterator> &&
detail::compatible_sentinel_v<Iterator, Sentinel> &&
(!detail::is_subtractable_v<Sentinel, Iterator>),
bool> = true>
typename std::iterator_traits<Iterator>::difference_type
distance(Iterator first, Sentinel last)
{
std::size_t result = 0;
while (first != last) {
++first;
++result;
}
return result;
}
/*!
* Equivalent of the `std::distance` applied to the sentinel-delimited
* random access range @f$ [first, last) @f$
*/
template <typename Iterator,
typename Sentinel,
std::enable_if_t<
(!detail::std_distance_supports_v<Iterator, Sentinel>) &&detail::
is_forward_iterator_v<Iterator> &&
detail::compatible_sentinel_v<Iterator, Sentinel> &&
detail::is_subtractable_v<Sentinel, Iterator>,
bool> = true>
typename std::iterator_traits<Iterator>::difference_type
distance(Iterator first, Sentinel last)
{
return last - first;
}
/*!
* An alias to `std::uninitialized_copy`
*/
template <
typename Iterator,
typename Sentinel,
typename SinkIter,
std::enable_if_t<
detail::std_uninitialized_copy_supports_v<Iterator, Sentinel, SinkIter>,
bool> = true>
SinkIter uninitialized_copy(Iterator first, Sentinel last, SinkIter d_first)
{
return std::uninitialized_copy(first, last, d_first);
}
/*!
* Equivalent of the `std::uninitialized_copy` applied to the
* sentinel-delimited forward range @f$ [first, last) @f$
*/
template <typename SourceIter,
typename Sent,
typename SinkIter,
std::enable_if_t<
(!detail::std_uninitialized_copy_supports_v<SourceIter,
Sent,
SinkIter>) &&detail::
compatible_sentinel_v<SourceIter, Sent> &&
detail::is_forward_iterator_v<SinkIter>,
bool> = true>
SinkIter uninitialized_copy(SourceIter first, Sent last, SinkIter d_first)
{
auto current = d_first;
try {
while (first != last) {
*current++ = *first;
++first;
}
} catch (...) {
using Value = typename std::iterator_traits<SinkIter>::value_type;
for (; d_first != current; ++d_first) {
d_first->~Value();
}
throw;
}
return current;
}
} // namespace detail
} // namespace immer

View file

@ -0,0 +1,498 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/heap/heap_policy.hpp>
#include <immer/refcount/enable_intrusive_ptr.hpp>
#include <immer/refcount/refcount_policy.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <cassert>
#include <limits>
namespace immer {
namespace detail {
namespace dvektor {
constexpr auto fast_log2(std::size_t x)
{
return x == 0 ? 0 : sizeof(std::size_t) * 8 - 1 - __builtin_clzl(x);
}
template <int B, typename T = std::size_t>
constexpr T branches = T{1} << B;
template <int B, typename T = std::size_t>
constexpr T mask = branches<B, T> - 1;
template <int B, typename T = std::size_t>
constexpr auto
max_depth = fast_log2(std::numeric_limits<std::size_t>::max()) / B;
template <typename T, int B, typename MP>
struct node;
template <typename T, int B, typename MP>
using node_ptr = boost::intrusive_ptr<node<T, B, MP>>;
template <typename T, int B>
using leaf_node = std::array<T, 1 << B>;
template <typename T, int B, typename MP>
using inner_node = std::array<node_ptr<T, B, MP>, 1 << B>;
template <typename T, int B, typename MP>
struct node
: enable_intrusive_ptr<node<T, B, MP>, typename MP::refcount>
, enable_optimized_heap_policy<node<T, B, MP>, typename MP::heap>
{
using leaf_node_t = leaf_node<T, B>;
using inner_node_t = inner_node<T, B, MP>;
enum
{
leaf_kind,
inner_kind
} kind;
union data_t
{
leaf_node_t leaf;
inner_node_t inner;
data_t(leaf_node_t n)
: leaf(std::move(n))
{}
data_t(inner_node_t n)
: inner(std::move(n))
{}
~data_t() {}
} data;
~node()
{
switch (kind) {
case leaf_kind:
data.leaf.~leaf_node_t();
break;
case inner_kind:
data.inner.~inner_node_t();
break;
}
}
node(leaf_node<T, B> n)
: kind{leaf_kind}
, data{std::move(n)}
{}
node(inner_node<T, B, MP> n)
: kind{inner_kind}
, data{std::move(n)}
{}
inner_node_t& inner() &
{
assert(kind == inner_kind);
return data.inner;
}
const inner_node_t& inner() const&
{
assert(kind == inner_kind);
return data.inner;
}
inner_node_t&& inner() &&
{
assert(kind == inner_kind);
return std::move(data.inner);
}
leaf_node_t& leaf() &
{
assert(kind == leaf_kind);
return data.leaf;
}
const leaf_node_t& leaf() const&
{
assert(kind == leaf_kind);
return data.leaf;
}
leaf_node_t&& leaf() &&
{
assert(kind == leaf_kind);
return std::move(data.leaf);
}
};
template <typename T, int B, typename MP, typename... Ts>
auto make_node(Ts&&... xs) -> boost::intrusive_ptr<node<T, B, MP>>
{
return new node<T, B, MP>(std::forward<Ts>(xs)...);
}
template <typename T, int B, typename MP>
struct ref
{
using inner_t = inner_node<T, B, MP>;
using leaf_t = leaf_node<T, B>;
using node_t = node<T, B, MP>;
using node_ptr_t = node_ptr<T, B, MP>;
unsigned depth;
std::array<node_ptr_t, max_depth<B>> display;
template <typename... Ts>
static auto make_node(Ts&&... xs)
{
return dvektor::make_node<T, B, MP>(std::forward<Ts>(xs)...);
}
const T& get_elem(std::size_t index, std::size_t xr) const
{
auto display_idx = fast_log2(xr) / B;
auto node = display[display_idx].get();
auto shift = display_idx * B;
while (display_idx--) {
node = node->inner()[(index >> shift) & mask<B>].get();
shift -= B;
}
return node->leaf()[index & mask<B>];
}
node_ptr_t null_slot_and_copy_inner(node_ptr_t& node, std::size_t idx)
{
auto& n = node->inner();
auto x = node_ptr_t{};
x.swap(n[idx]);
return copy_of_inner(x);
}
node_ptr_t null_slot_and_copy_leaf(node_ptr_t& node, std::size_t idx)
{
auto& n = node->inner();
auto x = node_ptr_t{};
x.swap(n[idx]);
return copy_of_leaf(x);
}
node_ptr_t copy_of_inner(const node_ptr_t& n)
{
return make_node(n->inner());
}
node_ptr_t copy_of_leaf(const node_ptr_t& n)
{
return make_node(n->leaf());
}
void stabilize(std::size_t index)
{
auto shift = B;
for (auto i = 1u; i < depth; ++i) {
display[i] = copy_of_inner(display[i]);
display[i]->inner()[(index >> shift) & mask<B>] = display[i - 1];
shift += B;
}
}
void goto_pos_writable_from_clean(std::size_t old_index,
std::size_t index,
std::size_t xr)
{
assert(depth);
auto d = depth - 1;
if (d == 0) {
display[0] = copy_of_leaf(display[0]);
} else {
IMMER_UNREACHABLE;
display[d] = copy_of_inner(display[d]);
auto shift = B * d;
while (--d) {
display[d] = null_slot_and_copy_inner(
display[d + 1], (index >> shift) & mask<B>);
shift -= B;
}
display[0] =
null_slot_and_copy_leaf(display[1], (index >> B) & mask<B>);
}
}
void goto_pos_writable_from_dirty(std::size_t old_index,
std::size_t new_index,
std::size_t xr)
{
assert(depth);
if (xr < (1 << B)) {
display[0] = copy_of_leaf(display[0]);
} else {
auto display_idx = fast_log2(xr) / B;
auto shift = B;
for (auto i = 1u; i <= display_idx; ++i) {
display[i] = copy_of_inner(display[i]);
display[i]->inner()[(old_index >> shift) & mask<B>] =
display[i - 1];
shift += B;
}
for (auto i = display_idx - 1; i > 0; --i) {
shift -= B;
display[i] = null_slot_and_copy_inner(
display[i + 1], (new_index >> shift) & mask<B>);
}
display[0] =
null_slot_and_copy_leaf(display[1], (new_index >> B) & mask<B>);
}
}
void goto_fresh_pos_writable_from_clean(std::size_t old_index,
std::size_t new_index,
std::size_t xr)
{
auto display_idx = fast_log2(xr) / B;
if (display_idx > 0) {
auto shift = display_idx * B;
if (display_idx == depth) {
display[display_idx] = make_node(inner_t{});
display[display_idx]->inner()[(old_index >> shift) & mask<B>] =
display[display_idx - 1];
++depth;
}
while (--display_idx) {
auto node = display[display_idx + 1]
->inner()[(new_index >> shift) & mask<B>];
display[display_idx] =
node ? std::move(node) : make_node(inner_t{});
}
display[0] = make_node(leaf_t{});
}
}
void goto_fresh_pos_writable_from_dirty(std::size_t old_index,
std::size_t new_index,
std::size_t xr)
{
stabilize(old_index);
goto_fresh_pos_writable_from_clean(old_index, new_index, xr);
}
void goto_next_block_start(std::size_t index, std::size_t xr)
{
auto display_idx = fast_log2(xr) / B;
auto shift = display_idx * B;
if (display_idx > 0) {
display[display_idx - 1] =
display[display_idx]->inner()[(index >> shift) & mask<B>];
while (--display_idx)
display[display_idx - 1] = display[display_idx]->inner()[0];
}
}
void goto_pos(std::size_t index, std::size_t xr)
{
auto display_idx = fast_log2(xr) / B;
auto shift = display_idx * B;
if (display_idx) {
do {
display[display_idx - 1] =
display[display_idx]->inner()[(index >> shift) & mask<B>];
shift -= B;
} while (--display_idx);
}
}
};
template <typename T, int B, typename MP>
struct impl
{
using inner_t = inner_node<T, B, MP>;
using leaf_t = leaf_node<T, B>;
using node_t = node<T, B, MP>;
using node_ptr_t = node_ptr<T, B, MP>;
using ref_t = ref<T, B, MP>;
std::size_t size;
std::size_t focus;
bool dirty;
ref_t p;
template <typename... Ts>
static auto make_node(Ts&&... xs)
{
return dvektor::make_node<T, B, MP>(std::forward<Ts>(xs)...);
}
void goto_pos_writable(std::size_t old_index,
std::size_t new_index,
std::size_t xr)
{
if (dirty) {
p.goto_pos_writable_from_dirty(old_index, new_index, xr);
} else {
p.goto_pos_writable_from_clean(old_index, new_index, xr);
dirty = true;
}
}
void goto_fresh_pos_writable(std::size_t old_index,
std::size_t new_index,
std::size_t xr)
{
if (dirty) {
p.goto_fresh_pos_writable_from_dirty(old_index, new_index, xr);
} else {
p.goto_fresh_pos_writable_from_clean(old_index, new_index, xr);
dirty = true;
}
}
impl push_back(T value) const
{
if (size) {
auto block_index = size & ~mask<B>;
auto lo = size & mask<B>;
if (size != block_index) {
auto s = impl{size + 1, block_index, dirty, p};
s.goto_pos_writable(focus, block_index, focus ^ block_index);
s.p.display[0]->leaf()[lo] = std::move(value);
return s;
} else {
auto s = impl{size + 1, block_index, dirty, p};
s.goto_fresh_pos_writable(
focus, block_index, focus ^ block_index);
s.p.display[0]->leaf()[lo] = std::move(value);
return s;
}
} else {
return impl{
1, 0, false, {1, {{make_node(leaf_t{{std::move(value)}})}}}};
}
}
const T& get(std::size_t index) const
{
return p.get_elem(index, index ^ focus);
}
template <typename FnT>
impl update(std::size_t idx, FnT&& fn) const
{
auto s = impl{size, idx, dirty, p};
s.goto_pos_writable(focus, idx, focus ^ idx);
auto& v = s.p.display[0]->leaf()[idx & mask<B>];
v = fn(std::move(v));
return s;
}
impl assoc(std::size_t idx, T value) const
{
return update(idx, [&](auto&&) { return std::move(value); });
}
};
template <typename T, int B, typename MP>
const impl<T, B, MP> empty = {0, 0, false, ref<T, B, MP>{1, {}}};
template <typename T, int B, typename MP>
struct iterator
: boost::iterator_facade<iterator<T, B, MP>,
T,
boost::random_access_traversal_tag,
const T&>
{
struct end_t
{};
iterator() = default;
iterator(const impl<T, B, MP>& v)
: p_{v.p}
, i_{0}
, base_{0}
{
if (v.dirty)
p_.stabilize(v.focus);
p_.goto_pos(0, 0 ^ v.focus);
curr_ = p_.display[0]->leaf().begin();
}
iterator(const impl<T, B, MP>& v, end_t)
: p_{v.p}
, i_{v.size}
, base_{(v.size - 1) & ~mask<B>}
{
if (v.dirty)
p_.stabilize(v.focus);
p_.goto_pos(base_, base_ ^ v.focus);
curr_ = p_.display[0]->leaf().begin() + (i_ - base_);
}
private:
friend class boost::iterator_core_access;
using leaf_iterator = typename leaf_node<T, B>::const_iterator;
ref<T, B, MP> p_;
std::size_t i_;
std::size_t base_;
leaf_iterator curr_;
void increment()
{
++i_;
if (i_ - base_ < branches<B>) {
++curr_;
} else {
auto new_base = base_ + branches<B>;
p_.goto_next_block_start(new_base, base_ ^ new_base);
base_ = new_base;
curr_ = p_.display[0]->leaf().begin();
}
}
void decrement()
{
assert(i_ > 0);
--i_;
if (i_ >= base_) {
--curr_;
} else {
auto new_base = base_ - branches<B>;
p_.goto_pos(new_base, base_ ^ new_base);
base_ = new_base;
curr_ = std::prev(p_.display[0]->leaf().end());
}
}
void advance(std::ptrdiff_t n)
{
i_ += n;
if (i_ <= base_ && i_ - base_ < branches<B>) {
curr_ += n;
} else {
auto new_base = i_ & ~mask<B>;
p_.goto_pos(new_base, base_ ^ new_base);
base_ = new_base;
curr_ = p_.display[0]->leaf().begin() + (i_ - base_);
}
}
bool equal(const iterator& other) const { return i_ == other.i_; }
std::ptrdiff_t distance_to(const iterator& other) const
{
return other.i_ > i_ ? static_cast<std::ptrdiff_t>(other.i_ - i_)
: -static_cast<std::ptrdiff_t>(i_ - other.i_);
}
const T& dereference() const { return *curr_; }
};
} /* namespace dvektor */
} /* namespace detail */
} /* namespace immer */

View file

@ -0,0 +1,69 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/experimental/detail/dvektor_impl.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
template <typename T, int B = 5, typename MemoryPolicy = default_memory_policy>
class dvektor
{
using impl_t = detail::dvektor::impl<T, B, MemoryPolicy>;
public:
using value_type = T;
using reference = const T&;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = detail::dvektor::iterator<T, B, MemoryPolicy>;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
dvektor() = default;
iterator begin() const { return {impl_}; }
iterator end() const { return {impl_, typename iterator::end_t{}}; }
reverse_iterator rbegin() const { return reverse_iterator{end()}; }
reverse_iterator rend() const { return reverse_iterator{begin()}; }
std::size_t size() const { return impl_.size; }
bool empty() const { return impl_.size == 0; }
reference operator[](size_type index) const { return impl_.get(index); }
dvektor push_back(value_type value) const
{
return {impl_.push_back(std::move(value))};
}
dvektor assoc(std::size_t idx, value_type value) const
{
return {impl_.assoc(idx, std::move(value))};
}
template <typename FnT>
dvektor update(std::size_t idx, FnT&& fn) const
{
return {impl_.update(idx, std::forward<FnT>(fn))};
}
private:
dvektor(impl_t impl)
: impl_(std::move(impl))
{}
impl_t impl_ = detail::dvektor::empty<T, B, MemoryPolicy>;
};
} // namespace immer

608
third_party/immer/immer/flex_vector.hpp vendored Normal file
View file

@ -0,0 +1,608 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/rbts/rrbtree.hpp>
#include <immer/detail/rbts/rrbtree_iterator.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
template <typename T,
typename MP,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class vector;
template <typename T,
typename MP,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class flex_vector_transient;
/*!
* Immutable sequential container supporting both random access,
* structural sharing and efficient concatenation and slicing.
*
* @tparam T The type of the values to be stored in the container.
* @tparam MemoryPolicy Memory management policy. See @ref
* memory_policy.
*
* @rst
*
* This container is very similar to `vector`_ but also supports
* :math:`O(log(size))` *concatenation*, *slicing* and *insertion* at
* any point. Its performance characteristics are almost identical
* until one of these operations is performed. After that,
* performance is degraded by a constant factor that usually oscilates
* in the range :math:`[1, 2)` depending on the operation and the
* amount of flexible operations that have been performed.
*
* .. tip:: A `vector`_ can be converted to a `flex_vector`_ in
* constant time without any allocation. This is so because the
* internal structure of a *vector* is a strict subset of the
* internal structure of a *flexible vector*. You can take
* advantage of this property by creating normal vectors as long as
* the flexible operations are not needed, and convert later in
* your processing pipeline once and if these are needed.
*
* @endrst
*/
template <typename T,
typename MemoryPolicy = default_memory_policy,
detail::rbts::bits_t B = default_bits,
detail::rbts::bits_t BL =
detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
class flex_vector
{
using impl_t = detail::rbts::rrbtree<T, MemoryPolicy, B, BL>;
using move_t =
std::integral_constant<bool, MemoryPolicy::use_transient_rvalues>;
public:
static constexpr auto bits = B;
static constexpr auto bits_leaf = BL;
using memory_policy = MemoryPolicy;
using value_type = T;
using reference = const T&;
using size_type = detail::rbts::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = detail::rbts::rrbtree_iterator<T, MemoryPolicy, B, BL>;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using transient_type = flex_vector_transient<T, MemoryPolicy, B, BL>;
/*!
* Default constructor. It creates a flex_vector of `size() == 0`.
* It does not allocate memory and its complexity is @f$ O(1) @f$.
*/
flex_vector() = default;
/*!
* Constructs a flex_vector containing the elements in `values`.
*/
flex_vector(std::initializer_list<T> values)
: impl_{impl_t::from_initializer_list(values)}
{}
/*!
* Constructs a flex_vector containing the elements in the range
* defined by the input iterator `first` and range sentinel `last`.
*/
template <typename Iter,
typename Sent,
std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent>,
bool> = true>
flex_vector(Iter first, Sent last)
: impl_{impl_t::from_range(first, last)}
{}
/*!
* Constructs a vector containing the element `val` repeated `n`
* times.
*/
flex_vector(size_type n, T v = {})
: impl_{impl_t::from_fill(n, v)}
{}
/*!
* Default constructor. It creates a flex_vector with the same
* contents as `v`. It does not allocate memory and is
* @f$ O(1) @f$.
*/
flex_vector(vector<T, MemoryPolicy, B, BL> v)
: impl_{v.impl_.size,
v.impl_.shift,
v.impl_.root->inc(),
v.impl_.tail->inc()}
{}
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return {impl_}; }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const
{
return {impl_, typename iterator::end_t{}};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing at the first element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rbegin() const
{
return reverse_iterator{end()};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing after the last element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rend() const
{
return reverse_iterator{begin()};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Access the last element.
*/
IMMER_NODISCARD const T& back() const { return impl_.back(); }
/*!
* Access the first element.
*/
IMMER_NODISCARD const T& front() const { return impl_.front(); }
/*!
* Returns a `const` reference to the element at position `index`.
* It is undefined when @f$ 0 index \geq size() @f$. It does not
* allocate memory and its complexity is *effectively* @f$ O(1)
* @f$.
*/
IMMER_NODISCARD reference operator[](size_type index) const
{
return impl_.get(index);
}
/*!
* Returns a `const` reference to the element at position
* `index`. It throws an `std::out_of_range` exception when @f$
* index \geq size() @f$. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
reference at(size_type index) const { return impl_.get_check(index); }
/*!
* Returns whether the vectors are equal.
*/
IMMER_NODISCARD bool operator==(const flex_vector& other) const
{
return impl_.equals(other.impl_);
}
IMMER_NODISCARD bool operator!=(const flex_vector& other) const
{
return !(*this == other);
}
/*!
* Returns a flex_vector with `value` inserted at the end. It may
* allocate memory and its complexity is *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: push-back/start
* :end-before: push-back/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector push_back(value_type value) const&
{
return impl_.push_back(std::move(value));
}
IMMER_NODISCARD decltype(auto) push_back(value_type value) &&
{
return push_back_move(move_t{}, std::move(value));
}
/*!
* Returns a flex_vector with `value` inserted at the frony. It may
* allocate memory and its complexity is @f$ O(log(size)) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: push-front/start
* :end-before: push-front/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector push_front(value_type value) const
{
return flex_vector{}.push_back(value) + *this;
}
/*!
* Returns a flex_vector containing value `value` at position `index`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: set/start
* :end-before: set/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector set(size_type index, value_type value) const&
{
return impl_.assoc(index, std::move(value));
}
IMMER_NODISCARD decltype(auto) set(size_type index, value_type value) &&
{
return set_move(move_t{}, index, std::move(value));
}
/*!
* Returns a vector containing the result of the expression
* `fn((*this)[idx])` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: update/start
* :end-before: update/end
*
* @endrst
*/
template <typename FnT>
IMMER_NODISCARD flex_vector update(size_type index, FnT&& fn) const&
{
return impl_.update(index, std::forward<FnT>(fn));
}
template <typename FnT>
IMMER_NODISCARD decltype(auto) update(size_type index, FnT&& fn) &&
{
return update_move(move_t{}, index, std::forward<FnT>(fn));
}
/*!
* Returns a vector containing only the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: take/start
* :end-before: take/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector take(size_type elems) const&
{
return impl_.take(elems);
}
IMMER_NODISCARD decltype(auto) take(size_type elems) &&
{
return take_move(move_t{}, elems);
}
/*!
* Returns a vector without the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: drop/start
* :end-before: drop/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector drop(size_type elems) const&
{
return impl_.drop(elems);
}
IMMER_NODISCARD decltype(auto) drop(size_type elems) &&
{
return drop_move(move_t{}, elems);
}
/*!
* Concatenation operator. Returns a flex_vector with the contents
* of `l` followed by those of `r`. It may allocate memory
* and its complexity is @f$ O(log(max(size_r, size_l))) @f$
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: concat/start
* :end-before: concat/end
*
* @endrst
*/
IMMER_NODISCARD friend flex_vector operator+(const flex_vector& l,
const flex_vector& r)
{
return l.impl_.concat(r.impl_);
}
IMMER_NODISCARD friend decltype(auto) operator+(flex_vector&& l,
const flex_vector& r)
{
return concat_move(move_t{}, std::move(l), r);
}
IMMER_NODISCARD friend decltype(auto) operator+(const flex_vector& l,
flex_vector&& r)
{
return concat_move(move_t{}, l, std::move(r));
}
IMMER_NODISCARD friend decltype(auto) operator+(flex_vector&& l,
flex_vector&& r)
{
return concat_move(move_t{}, std::move(l), std::move(r));
}
/*!
* Returns a flex_vector with the `value` inserted at index
* `pos`. It may allocate memory and its complexity is @f$
* O(log(size)) @f$
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: insert/start
* :end-before: insert/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector insert(size_type pos, T value) const&
{
return take(pos).push_back(std::move(value)) + drop(pos);
}
IMMER_NODISCARD decltype(auto) insert(size_type pos, T value) &&
{
using std::move;
auto rs = drop(pos);
return std::move(*this).take(pos).push_back(std::move(value)) +
std::move(rs);
}
IMMER_NODISCARD flex_vector insert(size_type pos, flex_vector value) const&
{
return take(pos) + std::move(value) + drop(pos);
}
IMMER_NODISCARD decltype(auto) insert(size_type pos, flex_vector value) &&
{
using std::move;
auto rs = drop(pos);
return std::move(*this).take(pos) + std::move(value) + std::move(rs);
}
/*!
* Returns a flex_vector without the element at index `pos`. It
* may allocate memory and its complexity is @f$ O(log(size)) @f$
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/flex-vector/flex-vector.cpp
* :language: c++
* :dedent: 8
* :start-after: erase/start
* :end-before: erase/end
*
* @endrst
*/
IMMER_NODISCARD flex_vector erase(size_type pos) const&
{
return take(pos) + drop(pos + 1);
}
IMMER_NODISCARD decltype(auto) erase(size_type pos) &&
{
auto rs = drop(pos + 1);
return std::move(*this).take(pos) + std::move(rs);
}
IMMER_NODISCARD flex_vector erase(size_type pos, size_type lpos) const&
{
return lpos > pos ? take(pos) + drop(lpos) : *this;
}
IMMER_NODISCARD decltype(auto) erase(size_type pos, size_type lpos) &&
{
if (lpos > pos) {
auto rs = drop(lpos);
return std::move(*this).take(pos) + std::move(rs);
} else {
return std::move(*this);
}
}
/*!
* Returns an @a transient form of this container, an
* `immer::flex_vector_transient`.
*/
IMMER_NODISCARD transient_type transient() const&
{
return transient_type{impl_};
}
IMMER_NODISCARD transient_type transient() &&
{
return transient_type{std::move(impl_)};
}
// Semi-private
const impl_t& impl() const { return impl_; }
#if IMMER_DEBUG_PRINT
void debug_print(std::ostream& out = std::cerr) const
{
impl_.debug_print(out);
}
#endif
private:
friend transient_type;
flex_vector(impl_t impl)
: impl_(std::move(impl))
{
#if IMMER_DEBUG_PRINT
// force the compiler to generate debug_print, so we can call
// it from a debugger
[](volatile auto) {}(&flex_vector::debug_print);
#endif
}
flex_vector&& push_back_move(std::true_type, value_type value)
{
impl_.push_back_mut({}, std::move(value));
return std::move(*this);
}
flex_vector push_back_move(std::false_type, value_type value)
{
return impl_.push_back(std::move(value));
}
flex_vector&& set_move(std::true_type, size_type index, value_type value)
{
impl_.assoc_mut({}, index, std::move(value));
return std::move(*this);
}
flex_vector set_move(std::false_type, size_type index, value_type value)
{
return impl_.assoc(index, std::move(value));
}
template <typename Fn>
flex_vector&& update_move(std::true_type, size_type index, Fn&& fn)
{
impl_.update_mut({}, index, std::forward<Fn>(fn));
return std::move(*this);
}
template <typename Fn>
flex_vector update_move(std::false_type, size_type index, Fn&& fn)
{
return impl_.update(index, std::forward<Fn>(fn));
}
flex_vector&& take_move(std::true_type, size_type elems)
{
impl_.take_mut({}, elems);
return std::move(*this);
}
flex_vector take_move(std::false_type, size_type elems)
{
return impl_.take(elems);
}
flex_vector&& drop_move(std::true_type, size_type elems)
{
impl_.drop_mut({}, elems);
return std::move(*this);
}
flex_vector drop_move(std::false_type, size_type elems)
{
return impl_.drop(elems);
}
static flex_vector&&
concat_move(std::true_type, flex_vector&& l, const flex_vector& r)
{
concat_mut_l(l.impl_, {}, r.impl_);
return std::move(l);
}
static flex_vector&&
concat_move(std::true_type, const flex_vector& l, flex_vector&& r)
{
concat_mut_r(l.impl_, r.impl_, {});
return std::move(r);
}
static flex_vector&&
concat_move(std::true_type, flex_vector&& l, flex_vector&& r)
{
concat_mut_lr_l(l.impl_, {}, r.impl_, {});
return std::move(l);
}
static flex_vector
concat_move(std::false_type, const flex_vector& l, const flex_vector& r)
{
return l.impl_.concat(r.impl_);
}
impl_t impl_ = impl_t::empty();
};
} // namespace immer

View file

@ -0,0 +1,251 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/rbts/rrbtree.hpp>
#include <immer/detail/rbts/rrbtree_iterator.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
template <typename T,
typename MemoryPolicy,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class flex_vector;
template <typename T,
typename MemoryPolicy,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class vector_transient;
/*!
* Mutable version of `immer::flex_vector`.
*
* @rst
*
* Refer to :doc:`transients` to learn more about when and how to use
* the mutable versions of immutable containers.
*
* @endrst
*/
template <typename T,
typename MemoryPolicy = default_memory_policy,
detail::rbts::bits_t B = default_bits,
detail::rbts::bits_t BL =
detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
class flex_vector_transient : MemoryPolicy::transience_t::owner
{
using impl_t = detail::rbts::rrbtree<T, MemoryPolicy, B, BL>;
using base_t = typename MemoryPolicy::transience_t::owner;
using owner_t = typename MemoryPolicy::transience_t::owner;
public:
static constexpr auto bits = B;
static constexpr auto bits_leaf = BL;
using memory_policy = MemoryPolicy;
using value_type = T;
using reference = const T&;
using size_type = detail::rbts::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = detail::rbts::rrbtree_iterator<T, MemoryPolicy, B, BL>;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using persistent_type = flex_vector<T, MemoryPolicy, B, BL>;
/*!
* Default constructor. It creates a flex_vector of `size() == 0`. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
flex_vector_transient() = default;
/*!
* Default constructor. It creates a flex_vector with the same
* contents as `v`. It does not allocate memory and is
* @f$ O(1) @f$.
*/
flex_vector_transient(vector_transient<T, MemoryPolicy, B, BL> v)
: base_t{std::move(static_cast<base_t&>(v))}
, impl_{v.impl_.size,
v.impl_.shift,
v.impl_.root->inc(),
v.impl_.tail->inc()}
{}
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return {impl_}; }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const
{
return {impl_, typename iterator::end_t{}};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing at the first element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rbegin() const
{
return reverse_iterator{end()};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing after the last element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rend() const
{
return reverse_iterator{begin()};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Returns a `const` reference to the element at position `index`.
* It is undefined when @f$ 0 index \geq size() @f$. It does not
* allocate memory and its complexity is *effectively* @f$ O(1)
* @f$.
*/
reference operator[](size_type index) const { return impl_.get(index); }
/*!
* Returns a `const` reference to the element at position
* `index`. It throws an `std::out_of_range` exception when @f$
* index \geq size() @f$. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
reference at(size_type index) const { return impl_.get_check(index); }
/*!
* Inserts `value` at the end. It may allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
void push_back(value_type value)
{
impl_.push_back_mut(*this, std::move(value));
}
/*!
* Sets to the value `value` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void set(size_type index, value_type value)
{
impl_.assoc_mut(*this, index, std::move(value));
}
/*!
* Updates the vector to contain the result of the expression
* `fn((*this)[idx])` at position `idx`.
* Undefined for `0 >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
template <typename FnT>
void update(size_type index, FnT&& fn)
{
impl_.update_mut(*this, index, std::forward<FnT>(fn));
}
/*!
* Resizes the vector to only contain the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void take(size_type elems) { impl_.take_mut(*this, elems); }
/*!
* Removes the first the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void drop(size_type elems) { impl_.drop_mut(*this, elems); }
/*!
* Appends the contents of the `r` at the end. It may allocate
* memory and its complexity is:
* @f$ O(log(max(size_r, size_l))) @f$
*/
void append(flex_vector_transient& r)
{
r.owner_t::operator=(owner_t{});
concat_mut_l(impl_, *this, r.impl_);
}
void append(flex_vector_transient&& r)
{
concat_mut_lr_l(impl_, *this, r.impl_, r);
}
/*!
* Prepends the contents of the `l` at the beginning. It may
* allocate memory and its complexity is:
* @f$ O(log(max(size_r, size_l))) @f$
*/
void prepend(flex_vector_transient& l)
{
l.owner_t::operator=(owner_t{});
concat_mut_r(l.impl_, impl_, *this);
}
void prepend(flex_vector_transient&& l)
{
concat_mut_lr_r(l.impl_, l, impl_, *this);
}
/*!
* Returns an @a immutable form of this container, an
* `immer::flex_vector`.
*/
IMMER_NODISCARD persistent_type persistent() &
{
this->owner_t::operator=(owner_t{});
return persistent_type{impl_};
}
IMMER_NODISCARD persistent_type persistent() &&
{
return persistent_type{std::move(impl_)};
}
private:
friend persistent_type;
flex_vector_transient(impl_t impl)
: impl_(std::move(impl))
{}
impl_t impl_ = impl_t::empty();
};
} // namespace immer

View file

@ -0,0 +1,41 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <memory>
namespace immer {
/*!
* A heap that uses `operator new` and `operator delete`.
*/
struct cpp_heap
{
/*!
* Returns a pointer to a memory region of size `size`, if the
* allocation was successful, and throws otherwise.
*/
template <typename... Tags>
static void* allocate(std::size_t size, Tags...)
{
return ::operator new(size);
}
/*!
* Releases a memory region `data` that was previously returned by
* `allocate`. One must not use nor deallocate again a memory
* region that once it has been deallocated.
*/
static void deallocate(std::size_t size, void* data)
{
::operator delete(data);
}
};
} // namespace immer

View file

@ -0,0 +1,69 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <immer/heap/identity_heap.hpp>
#include <cassert>
#include <cstddef>
#include <type_traits>
#include <memory>
namespace immer {
#if IMMER_ENABLE_DEBUG_SIZE_HEAP
/*!
* A heap that in debug mode ensures that the sizes for allocation and
* deallocation do match.
*/
template <typename Base>
struct debug_size_heap
{
#if defined(__MINGW32__) && !defined(__MINGW64__)
// There is a bug in MinGW 32bit:
// https://sourceforge.net/p/mingw-w64/bugs/778/ It causes different
// versions of std::max_align_t to be defined, depending on inclusion order
// of stddef.h and stdint.h. As we have no control over the inclusion order
// here (as it might be set in stone by the outside world), we can't easily
// pin it to one of both versions of std::max_align_t. This means, we have
// to hardcode extra_size for MinGW 32bit builds until the mentioned bug is
// fixed.
constexpr static auto extra_size = 8;
#else
constexpr static auto extra_size = sizeof(
std::aligned_storage_t<sizeof(std::size_t), alignof(std::max_align_t)>);
#endif
template <typename... Tags>
static void* allocate(std::size_t size, Tags... tags)
{
auto p = (std::size_t*) Base::allocate(size + extra_size, tags...);
new (p) std::size_t{size};
return ((char*) p) + extra_size;
}
template <typename... Tags>
static void deallocate(std::size_t size, void* data, Tags... tags)
{
auto p = (std::size_t*) (((char*) data) - extra_size);
assert(*p == size);
Base::deallocate(size + extra_size, p, tags...);
}
};
#else // IMMER_ENABLE_DEBUG_SIZE_HEAP
template <typename Base>
using debug_size_heap = identity_heap<Base>;
#endif // !IMMER_ENABLE_DEBUG_SIZE_HEAP
} // namespace immer

View file

@ -0,0 +1,83 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/heap/free_list_node.hpp>
#include <immer/heap/with_data.hpp>
#include <atomic>
#include <cassert>
namespace immer {
/*!
* Adaptor that does not release the memory to the parent heap but
* instead it keeps the memory in a thread-safe global free list. Must
* be preceded by a `with_data<free_list_node, ...>` heap adaptor.
*
* @tparam Size Maximum size of the objects to be allocated.
* @tparam Base Type of the parent heap.
*/
template <std::size_t Size, std::size_t Limit, typename Base>
struct free_list_heap : Base
{
using base_t = Base;
template <typename... Tags>
static void* allocate(std::size_t size, Tags...)
{
assert(size <= sizeof(free_list_node) + Size);
assert(size >= sizeof(free_list_node));
free_list_node* n;
do {
n = head().data;
if (!n) {
auto p = base_t::allocate(Size + sizeof(free_list_node));
return static_cast<free_list_node*>(p);
}
} while (!head().data.compare_exchange_weak(n, n->next));
head().count.fetch_sub(1u, std::memory_order_relaxed);
return n;
}
template <typename... Tags>
static void deallocate(std::size_t size, void* data, Tags...)
{
assert(size <= sizeof(free_list_node) + Size);
assert(size >= sizeof(free_list_node));
// we use relaxed, because we are fine with temporarily having
// a few more/less buffers in free list
if (head().count.load(std::memory_order_relaxed) >= Limit) {
base_t::deallocate(Size + sizeof(free_list_node), data);
} else {
auto n = static_cast<free_list_node*>(data);
do {
n->next = head().data;
} while (!head().data.compare_exchange_weak(n->next, n));
head().count.fetch_add(1u, std::memory_order_relaxed);
}
}
private:
struct head_t
{
std::atomic<free_list_node*> data;
std::atomic<std::size_t> count;
};
static head_t& head()
{
static head_t head_{{nullptr}, {0}};
return head_;
}
};
} // namespace immer

View file

@ -0,0 +1,24 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/heap/with_data.hpp>
namespace immer {
struct free_list_node
{
free_list_node* next;
};
template <typename Base>
struct with_free_list_node : with_data<free_list_node, Base>
{};
} // namespace immer

127
third_party/immer/immer/heap/gc_heap.hpp vendored Normal file
View file

@ -0,0 +1,127 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <immer/heap/tags.hpp>
#if IMMER_HAS_LIBGC
#include <gc/gc.h>
#else
#error "Using garbage collection requires libgc"
#endif
#include <cstdlib>
#include <memory>
namespace immer {
#ifdef __APPLE__
#define IMMER_GC_REQUIRE_INIT 1
#else
#define IMMER_GC_REQUIRE_INIT 0
#endif
#if IMMER_GC_REQUIRE_INIT
namespace detail {
template <int Dummy = 0>
struct gc_initializer
{
gc_initializer() { GC_init(); }
static gc_initializer init;
};
template <int D>
gc_initializer<D> gc_initializer<D>::init{};
inline void gc_initializer_guard()
{
static gc_initializer<> init_ = gc_initializer<>::init;
(void) init_;
}
} // namespace detail
#define IMMER_GC_INIT_GUARD_ ::immer::detail::gc_initializer_guard()
#else
#define IMMER_GC_INIT_GUARD_
#endif // IMMER_GC_REQUIRE_INIT
/*!
* Heap that uses a tracing garbage collector.
*
* @rst
*
* This heap uses the `Boehm's conservative garbage collector`_ under
* the hood. This is a tracing garbage collector that automatically
* reclaims unused memory. Thus, it is not needed to call
* ``deallocate()`` in order to release memory.
*
* .. admonition:: Dependencies
* :class: tip
*
* In order to use this header file, you need to make sure that
* Boehm's ``libgc`` is your include path and link to its binary
* library.
*
* .. caution:: Memory that is allocated with the standard ``malloc``
* and ``free`` is not visible to ``libgc`` when it is looking for
* references. This means that if, let's say, you store a
* :cpp:class:`immer::vector` using a ``gc_heap`` inside a
* ``std::vector`` that uses a standard allocator, the memory of
* the former might be released automatically at unexpected times
* causing crashes.
*
* .. caution:: When using a ``gc_heap`` in combination with immutable
* containers, the destructors of the contained objects will never
* be called. It is ok to store containers inside containers as
* long as all of them use a ``gc_heap`` consistently, but storing
* other kinds of objects with relevant destructors
* (e.g. containers with reference counting or other kinds of
* *resource handles*) might cause memory leaks and other problems.
*
* .. _boehm's conservative garbage collector: https://github.com/ivmai/bdwgc
*
* @endrst
*/
class gc_heap
{
public:
static void* allocate(std::size_t n)
{
IMMER_GC_INIT_GUARD_;
auto p = GC_malloc(n);
if (IMMER_UNLIKELY(!p))
throw std::bad_alloc{};
return p;
}
static void* allocate(std::size_t n, norefs_tag)
{
IMMER_GC_INIT_GUARD_;
auto p = GC_malloc_atomic(n);
if (IMMER_UNLIKELY(!p))
throw std::bad_alloc{};
return p;
}
static void deallocate(std::size_t, void* data) { GC_free(data); }
static void deallocate(std::size_t, void* data, norefs_tag)
{
GC_free(data);
}
};
} // namespace immer

View file

@ -0,0 +1,141 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <immer/heap/debug_size_heap.hpp>
#include <immer/heap/free_list_heap.hpp>
#include <immer/heap/split_heap.hpp>
#include <immer/heap/thread_local_free_list_heap.hpp>
#include <algorithm>
#include <cstdlib>
namespace immer {
/*!
* Heap policy that unconditionally uses its `Heap` argument.
*/
template <typename Heap>
struct heap_policy
{
using type = Heap;
template <std::size_t>
struct optimized
{
using type = Heap;
};
};
template <typename Deriv, typename HeapPolicy>
struct enable_optimized_heap_policy
{
static void* operator new(std::size_t size)
{
using heap_type =
typename HeapPolicy ::template optimized<sizeof(Deriv)>::type;
return heap_type::allocate(size);
}
static void operator delete(void* data, std::size_t size)
{
using heap_type =
typename HeapPolicy ::template optimized<sizeof(Deriv)>::type;
heap_type::deallocate(size, data);
}
};
/*!
* Heap policy that returns a heap with a free list of objects
* of `max_size = max(Sizes...)` on top an underlying `Heap`. Note
* these two properties of the resulting heap:
*
* - Allocating an object that is bigger than `max_size` may trigger
* *undefined behavior*.
*
* - Allocating an object of size less than `max_size` still
* returns an object of `max_size`.
*
* Basically, this heap will always return objects of `max_size`.
* When an object is freed, it does not directly invoke `std::free`,
* but it keeps the object in a global linked list instead. When a
* new object is requested, it does not need to call `std::malloc` but
* it can directly pop and return the other object from the global
* list, a much faster operation.
*
* This actually creates a hierarchy with two free lists:
*
* - A `thread_local` free list is used first. It does not need any
* kind of synchronization and is very fast. When the thread
* finishes, its contents are returned to the next free list.
*
* - A global free list using lock-free access via atomics.
*
* @tparam Heap Heap to be used when the free list is empty.
*
* @rst
*
* .. tip:: For many applications that use immutable data structures
* significantly, this is actually the best heap policy, and it
* might become the default in the future.
*
* Note that most our data structures internally use trees with the
* same big branching factors. This means that all *vectors*,
* *maps*, etc. can just allocate elements from the same free-list
* optimized heap. Not only does this lowers the allocation time,
* but also makes up for more efficient *cache utilization*. When
* a new node is needed, there are high chances the allocator will
* return a node that was just accessed. When batches of immutable
* updates are made, this can make a significant difference.
*
* @endrst
*/
template <typename Heap, std::size_t Limit = default_free_list_size>
struct free_list_heap_policy
{
using type = debug_size_heap<Heap>;
template <std::size_t Size>
struct optimized
{
using type =
split_heap<Size,
with_free_list_node<thread_local_free_list_heap<
Size,
Limit,
free_list_heap<Size, Limit, debug_size_heap<Heap>>>>,
debug_size_heap<Heap>>;
};
};
/*!
* Similar to @ref free_list_heap_policy, but it assumes no
* multi-threading, so a single global free list with no concurrency
* checks is used.
*/
template <typename Heap, std::size_t Limit = default_free_list_size>
struct unsafe_free_list_heap_policy
{
using type = Heap;
template <std::size_t Size>
struct optimized
{
using type = split_heap<
Size,
with_free_list_node<
unsafe_free_list_heap<Size, Limit, debug_size_heap<Heap>>>,
debug_size_heap<Heap>>;
};
};
} // namespace immer

View file

@ -0,0 +1,34 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <cstdlib>
namespace immer {
/*!
* A heap that simply passes on to the parent heap.
*/
template <typename Base>
struct identity_heap : Base
{
template <typename... Tags>
static void* allocate(std::size_t size, Tags... tags)
{
return Base::allocate(size, tags...);
}
template <typename... Tags>
static void deallocate(std::size_t size, void* data, Tags... tags)
{
Base::deallocate(size, data, tags...);
}
};
} // namespace immer

View file

@ -0,0 +1,44 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/config.hpp>
#include <cstdlib>
#include <memory>
namespace immer {
/*!
* A heap that uses `std::malloc` and `std::free` to manage memory.
*/
struct malloc_heap
{
/*!
* Returns a pointer to a memory region of size `size`, if the
* allocation was successful and throws `std::bad_alloc` otherwise.
*/
template <typename... Tags>
static void* allocate(std::size_t size, Tags...)
{
auto p = std::malloc(size);
if (IMMER_UNLIKELY(!p))
throw std::bad_alloc{};
return p;
}
/*!
* Releases a memory region `data` that was previously returned by
* `allocate`. One must not use nor deallocate again a memory
* region that once it has been deallocated.
*/
static void deallocate(std::size_t, void* data) { std::free(data); }
};
} // namespace immer

View file

@ -0,0 +1,40 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <atomic>
#include <cassert>
namespace immer {
/*!
* Adaptor that uses `SmallHeap` for allocations that are smaller or
* equal to `Size` and `BigHeap` otherwise.
*/
template <std::size_t Size, typename SmallHeap, typename BigHeap>
struct split_heap
{
template <typename... Tags>
static void* allocate(std::size_t size, Tags... tags)
{
return size <= Size ? SmallHeap::allocate(size, tags...)
: BigHeap::allocate(size, tags...);
}
template <typename... Tags>
static void deallocate(std::size_t size, void* data, Tags... tags)
{
if (size <= Size)
SmallHeap::deallocate(size, data, tags...);
else
BigHeap::deallocate(size, data, tags...);
}
};
} // namespace immer

16
third_party/immer/immer/heap/tags.hpp vendored Normal file
View file

@ -0,0 +1,16 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
namespace immer {
struct norefs_tag
{};
} // namespace immer

View file

@ -0,0 +1,55 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/heap/unsafe_free_list_heap.hpp>
namespace immer {
namespace detail {
template <typename Heap>
struct thread_local_free_list_storage
{
struct head_t
{
free_list_node* data;
std::size_t count;
~head_t() { Heap::clear(); }
};
static head_t& head()
{
thread_local static head_t head_{nullptr, 0};
return head_;
}
};
} // namespace detail
/*!
* Adaptor that does not release the memory to the parent heap but
* instead it keeps the memory in a `thread_local` global free
* list. Must be preceded by a `with_data<free_list_node, ...>` heap
* adaptor. When the current thread finishes, the memory is returned
* to the parent heap.
*
* @tparam Size Maximum size of the objects to be allocated.
* @tparam Limit Maximum number of elements to keep in the free list.
* @tparam Base Type of the parent heap.
*/
template <std::size_t Size, std::size_t Limit, typename Base>
struct thread_local_free_list_heap
: detail::unsafe_free_list_heap_impl<detail::thread_local_free_list_storage,
Size,
Limit,
Base>
{};
} // namespace immer

View file

@ -0,0 +1,109 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <cassert>
#include <immer/config.hpp>
#include <immer/heap/free_list_node.hpp>
namespace immer {
namespace detail {
template <typename Heap>
struct unsafe_free_list_storage
{
struct head_t
{
free_list_node* data;
std::size_t count;
};
static head_t& head()
{
static head_t head_{nullptr, 0};
return head_;
}
};
template <template <class> class Storage,
std::size_t Size,
std::size_t Limit,
typename Base>
class unsafe_free_list_heap_impl : Base
{
using storage = Storage<unsafe_free_list_heap_impl>;
public:
using base_t = Base;
template <typename... Tags>
static void* allocate(std::size_t size, Tags...)
{
assert(size <= sizeof(free_list_node) + Size);
assert(size >= sizeof(free_list_node));
auto n = storage::head().data;
if (!n) {
auto p = base_t::allocate(Size + sizeof(free_list_node));
return static_cast<free_list_node*>(p);
}
--storage::head().count;
storage::head().data = n->next;
return n;
}
template <typename... Tags>
static void deallocate(std::size_t size, void* data, Tags...)
{
assert(size <= sizeof(free_list_node) + Size);
assert(size >= sizeof(free_list_node));
if (storage::head().count >= Limit)
base_t::deallocate(Size + sizeof(free_list_node), data);
else {
auto n = static_cast<free_list_node*>(data);
n->next = storage::head().data;
storage::head().data = n;
++storage::head().count;
}
}
static void clear()
{
while (storage::head().data) {
auto n = storage::head().data->next;
base_t::deallocate(Size + sizeof(free_list_node),
storage::head().data);
storage::head().data = n;
--storage::head().count;
}
}
};
} // namespace detail
/*!
* Adaptor that does not release the memory to the parent heap but
* instead it keeps the memory in a global free list that **is not
* thread-safe**. Must be preceded by a `with_data<free_list_node,
* ...>` heap adaptor.
*
* @tparam Size Maximum size of the objects to be allocated.
* @tparam Limit Maximum number of elements to keep in the free list.
* @tparam Base Type of the parent heap.
*/
template <std::size_t Size, std::size_t Limit, typename Base>
struct unsafe_free_list_heap
: detail::unsafe_free_list_heap_impl<detail::unsafe_free_list_storage,
Size,
Limit,
Base>
{};
} // namespace immer

View file

@ -0,0 +1,43 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <cstdio>
namespace immer {
/*!
* Appends a default constructed extra object of type `T` at the
* *before* the requested region.
*
* @tparam T Type of the appended data.
* @tparam Base Type of the parent heap.
*/
template <typename T, typename Base>
struct with_data : Base
{
using base_t = Base;
template <typename... Tags>
static void* allocate(std::size_t size, Tags... tags)
{
auto p = base_t::allocate(size + sizeof(T), tags...);
return new (p) T{} + 1;
}
template <typename... Tags>
static void deallocate(std::size_t size, void* p, Tags... tags)
{
auto dp = static_cast<T*>(p) - 1;
dp->~T();
base_t::deallocate(size + sizeof(T), dp, tags...);
}
};
} // namespace immer

342
third_party/immer/immer/map.hpp vendored Normal file
View file

@ -0,0 +1,342 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/hamts/champ.hpp>
#include <immer/detail/hamts/champ_iterator.hpp>
#include <immer/memory_policy.hpp>
#include <functional>
namespace immer {
template <typename K,
typename T,
typename Hash,
typename Equal,
typename MemoryPolicy,
detail::hamts::bits_t B>
class map_transient;
/*!
* Immutable unordered mapping of values from type `K` to type `T`.
*
* @tparam K The type of the keys.
* @tparam T The type of the values to be stored in the container.
* @tparam Hash The type of a function object capable of hashing
* values of type `T`.
* @tparam Equal The type of a function object capable of comparing
* values of type `T`.
* @tparam MemoryPolicy Memory management policy. See @ref
* memory_policy.
*
* @rst
*
* This cotainer provides a good trade-off between cache locality,
* search, update performance and structural sharing. It does so by
* storing the data in contiguous chunks of :math:`2^{B}` elements.
* When storing big objects, the size of these contiguous chunks can
* become too big, damaging performance. If this is measured to be
* problematic for a specific use-case, it can be solved by using a
* `immer::box` to wrap the type `T`.
*
* **Example**
* .. literalinclude:: ../example/map/intro.cpp
* :language: c++
* :start-after: intro/start
* :end-before: intro/end
*
* @endrst
*
*/
template <typename K,
typename T,
typename Hash = std::hash<K>,
typename Equal = std::equal_to<K>,
typename MemoryPolicy = default_memory_policy,
detail::hamts::bits_t B = default_bits>
class map
{
using value_t = std::pair<K, T>;
struct project_value
{
const T& operator()(const value_t& v) const noexcept
{
return v.second;
}
};
struct project_value_ptr
{
const T* operator()(const value_t& v) const noexcept
{
return &v.second;
}
};
struct combine_value
{
template <typename Kf, typename Tf>
value_t operator()(Kf&& k, Tf&& v) const
{
return {std::forward<Kf>(k), std::forward<Tf>(v)};
}
};
struct default_value
{
const T& operator()() const
{
static T v{};
return v;
}
};
struct error_value
{
const T& operator()() const
{
throw std::out_of_range{"key not found"};
}
};
struct hash_key
{
auto operator()(const value_t& v) { return Hash{}(v.first); }
auto operator()(const K& v) { return Hash{}(v); }
};
struct equal_key
{
auto operator()(const value_t& a, const value_t& b)
{
return Equal{}(a.first, b.first);
}
auto operator()(const value_t& a, const K& b)
{
return Equal{}(a.first, b);
}
};
struct equal_value
{
auto operator()(const value_t& a, const value_t& b)
{
return Equal{}(a.first, b.first) && a.second == b.second;
}
};
using impl_t =
detail::hamts::champ<value_t, hash_key, equal_key, MemoryPolicy, B>;
public:
using key_type = K;
using mapped_type = T;
using value_type = std::pair<K, T>;
using size_type = detail::hamts::size_t;
using diference_type = std::ptrdiff_t;
using hasher = Hash;
using key_equal = Equal;
using reference = const value_type&;
using const_reference = const value_type&;
using iterator = detail::hamts::
champ_iterator<value_t, hash_key, equal_key, MemoryPolicy, B>;
using const_iterator = iterator;
using transient_type = map_transient<K, T, Hash, Equal, MemoryPolicy, B>;
/*!
* Default constructor. It creates a set of `size() == 0`. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
map() = default;
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return {impl_}; }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const
{
return {impl_, typename iterator::end_t{}};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Returns `1` when the key `k` is contained in the map or `0`
* otherwise. It won't allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type count(const K& k) const
{
return impl_.template get<detail::constantly<size_type, 1>,
detail::constantly<size_type, 0>>(k);
}
/*!
* Returns a `const` reference to the values associated to the key
* `k`. If the key is not contained in the map, it returns a
* default constructed value. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
IMMER_NODISCARD const T& operator[](const K& k) const
{
return impl_.template get<project_value, default_value>(k);
}
/*!
* Returns a `const` reference to the values associated to the key
* `k`. If the key is not contained in the map, throws an
* `std::out_of_range` error. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
const T& at(const K& k) const
{
return impl_.template get<project_value, error_value>(k);
}
/*!
* Returns a pointer to the value associated with the key `k`. If
* the key is not contained in the map, a `nullptr` is returned.
* It does not allocate memory and its complexity is *effectively*
* @f$ O(1) @f$.
*
* @rst
*
* .. admonition:: Why doesn't this function return an iterator?
*
* Associative containers from the C++ standard library provide a
* ``find`` method that returns an iterator pointing to the
* element in the container or ``end()`` when the key is missing.
* In the case of an unordered container, the only meaningful
* thing one may do with it is to compare it with the end, to
* test if the find was succesfull, and dereference it. This
* comparison is cumbersome compared to testing for a non-empty
* optional value. Furthermore, for an immutable container,
* returning an iterator would have some additional performance
* cost, with no benefits otherwise.
*
* In our opinion, this function should return a
* ``std::optional<const T&>`` but this construction is not valid
* in any current standard. As a compromise we return a
* pointer, which has similar syntactic properties yet it is
* unfortunatelly unnecessarily unrestricted.
*
* @endrst
*/
IMMER_NODISCARD const T* find(const K& k) const
{
return impl_.template get<project_value_ptr,
detail::constantly<const T*, nullptr>>(k);
}
/*!
* Returns whether the sets are equal.
*/
IMMER_NODISCARD bool operator==(const map& other) const
{
return impl_.template equals<equal_value>(other.impl_);
}
IMMER_NODISCARD bool operator!=(const map& other) const
{
return !(*this == other);
}
/*!
* Returns a map containing the association `value`. If the key is
* already in the map, it replaces its association in the map.
* It may allocate memory and its complexity is *effectively* @f$
* O(1) @f$.
*/
IMMER_NODISCARD map insert(value_type value) const
{
return impl_.add(std::move(value));
}
/*!
* Returns a map containing the association `(k, v)`. If the key
* is already in the map, it replaces its association in the map.
* It may allocate memory and its complexity is *effectively* @f$
* O(1) @f$.
*/
IMMER_NODISCARD map set(key_type k, mapped_type v) const
{
return impl_.add({std::move(k), std::move(v)});
}
/*!
* Returns a map replacing the association `(k, v)` by the
* association new association `(k, fn(v))`, where `v` is the
* currently associated value for `k` in the map or a default
* constructed value otherwise. It may allocate memory
* and its complexity is *effectively* @f$ O(1) @f$.
*/
template <typename Fn>
IMMER_NODISCARD map update(key_type k, Fn&& fn) const
{
return impl_
.template update<project_value, default_value, combine_value>(
std::move(k), std::forward<Fn>(fn));
}
/*!
* Returns a map without the key `k`. If the key is not
* associated in the map it returns the same map. It may allocate
* memory and its complexity is *effectively* @f$ O(1) @f$.
*/
IMMER_NODISCARD map erase(const K& k) const { return impl_.sub(k); }
/*!
* Returns an @a transient form of this container, a
* `immer::map_transient`.
*/
IMMER_NODISCARD transient_type transient() const&
{
return transient_type{impl_};
}
IMMER_NODISCARD transient_type transient() &&
{
return transient_type{std::move(impl_)};
}
// Semi-private
const impl_t& impl() const { return impl_; }
private:
friend transient_type;
map(impl_t impl)
: impl_(std::move(impl))
{}
impl_t impl_ = impl_t::empty();
};
} // namespace immer

View file

@ -0,0 +1,41 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/hamts/champ.hpp>
#include <immer/memory_policy.hpp>
#include <functional>
namespace immer {
/*!
* @rst
*
* .. admonition:: Become a sponsor!
* :class: danger
*
* This component is planned but it has **not been implemented yet**.
*
* Transiens can critically improve the performance of applications
* intensively using ``set`` and ``map``. If you are working for an
* organization using the library in a commercial project, please consider
* **sponsoring this work**: juanpe@sinusoid.al
*
* @endrst
*/
template <typename K,
typename T,
typename Hash = std::hash<K>,
typename Equal = std::equal_to<K>,
typename MemoryPolicy = default_memory_policy,
detail::hamts::bits_t B = default_bits>
class map_transient;
} // namespace immer

View file

@ -0,0 +1,135 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/heap/cpp_heap.hpp>
#include <immer/heap/heap_policy.hpp>
#include <immer/refcount/no_refcount_policy.hpp>
#include <immer/refcount/refcount_policy.hpp>
#include <immer/refcount/unsafe_refcount_policy.hpp>
#include <immer/transience/gc_transience_policy.hpp>
#include <immer/transience/no_transience_policy.hpp>
#include <type_traits>
namespace immer {
/*!
* Metafunction that returns the best *transience policy* to use for a
* given *refcount policy*.
*/
template <typename RefcountPolicy>
struct get_transience_policy
: std::conditional<std::is_same<RefcountPolicy, no_refcount_policy>::value,
gc_transience_policy,
no_transience_policy>
{};
template <typename T>
using get_transience_policy_t = typename get_transience_policy<T>::type;
/*!
* Metafunction that returns wether to *prefer fewer bigger objects*
* to use for a given *heap policy*.
*/
template <typename HeapPolicy>
struct get_prefer_fewer_bigger_objects
: std::integral_constant<
bool,
std::is_same<HeapPolicy, heap_policy<cpp_heap>>::value>
{};
template <typename T>
constexpr auto get_prefer_fewer_bigger_objects_v =
get_prefer_fewer_bigger_objects<T>::value;
/*!
* Metafunction that returns wether to use *transient R-Values*
* for a given *refcount policy*.
*/
template <typename RefcountPolicy>
struct get_use_transient_rvalues
: std::integral_constant<
bool,
!std::is_same<RefcountPolicy, no_refcount_policy>::value>
{};
template <typename T>
constexpr auto get_use_transient_rvalues_v =
get_use_transient_rvalues<T>::value;
/*!
* This is a default implementation of a *memory policy*. A memory
* policy is just a bag of other policies plus some flags with hints
* to the user about the best way to use these strategies.
*
* @tparam HeapPolicy A *heap policy*, for example, @ref heap_policy.
* @tparam RefcountPolicy A *reference counting policy*, for example,
* @ref refcount_policy.
* @tparam TransiencePolicy A *transience policy*, for example,
* @ref no_transience_policy.
* @tparam PreferFewerBiggerObjects Boolean flag indicating whether
* the user should prefer to allocate memory in bigger chungs
* --e.g. by putting various objects in the same memory
* region-- or not.
* @tparam UseTransientRValues Boolean flag indicating whether
* immutable containers should try to modify contents in-place
* when manipulating an r-value reference.
*/
template <typename HeapPolicy,
typename RefcountPolicy,
typename TransiencePolicy = get_transience_policy_t<RefcountPolicy>,
bool PreferFewerBiggerObjects =
get_prefer_fewer_bigger_objects_v<HeapPolicy>,
bool UseTransientRValues =
get_use_transient_rvalues_v<RefcountPolicy>>
struct memory_policy
{
using heap = HeapPolicy;
using refcount = RefcountPolicy;
using transience = TransiencePolicy;
static constexpr bool prefer_fewer_bigger_objects =
PreferFewerBiggerObjects;
static constexpr bool use_transient_rvalues = UseTransientRValues;
using transience_t = typename transience::template apply<heap>::type;
};
/*!
* The default *heap policy* just uses the standard heap with a
* @ref free_list_heap_policy. If `IMMER_NO_FREE_LIST` is defined to `1`
* then it just uses the standard heap.
*/
#if IMMER_NO_FREE_LIST
using default_heap_policy = heap_policy<debug_size_heap<cpp_heap>>;
#else
#if IMMER_NO_THREAD_SAFETY
using default_heap_policy = unsafe_free_list_heap_policy<cpp_heap>;
#else
using default_heap_policy = free_list_heap_policy<cpp_heap>;
#endif
#endif
/*!
* By default we use thread safe reference counting.
*/
#if IMMER_NO_THREAD_SAFETY
using default_refcount_policy = unsafe_refcount_policy;
#else
using default_refcount_policy = refcount_policy;
#endif
/*!
* The default memory policy.
*/
using default_memory_policy =
memory_policy<default_heap_policy, default_refcount_policy>;
} // namespace immer

View file

@ -0,0 +1,37 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/refcount/no_refcount_policy.hpp>
namespace immer {
template <typename Deriv, typename RefcountPolicy>
class enable_intrusive_ptr
{
mutable RefcountPolicy refcount_data_;
public:
enable_intrusive_ptr()
: refcount_data_{disowned{}}
{}
friend void intrusive_ptr_add_ref(const Deriv* x)
{
x->refcount_data_.inc();
}
friend void intrusive_ptr_release(const Deriv* x)
{
if (x->refcount_data_.dec())
delete x;
}
};
} // namespace immer

View file

@ -0,0 +1,45 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
namespace immer {
struct disowned
{};
struct no_spinlock
{
bool try_lock() { return true; }
void lock() {}
void unlock() {}
struct scoped_lock
{
scoped_lock(no_spinlock&) {}
};
};
/*!
* Disables reference counting, to be used with an alternative garbage
* collection strategy like a `gc_heap`.
*/
struct no_refcount_policy
{
using spinlock_type = no_spinlock;
no_refcount_policy(){};
no_refcount_policy(disowned) {}
void inc() {}
bool dec() { return false; }
void dec_unsafe() {}
bool unique() { return false; }
};
} // namespace immer

View file

@ -0,0 +1,101 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/refcount/no_refcount_policy.hpp>
#include <atomic>
#include <cassert>
#include <thread>
#include <utility>
// This has been shamelessly copied from boost...
#if defined(_MSC_VER) && _MSC_VER >= 1310 && \
(defined(_M_IX86) || defined(_M_X64)) && !defined(__c2__)
extern "C" void _mm_pause();
#define IMMER_SMT_PAUSE _mm_pause()
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define IMMER_SMT_PAUSE __asm__ __volatile__("rep; nop" : : : "memory")
#endif
namespace immer {
// This is an atomic spinlock similar to the one used by boost to provide
// "atomic" shared_ptr operations. It also does not differ much from the one
// from libc++ or libstdc++...
struct spinlock
{
std::atomic_flag v_{};
bool try_lock() { return !v_.test_and_set(std::memory_order_acquire); }
void lock()
{
for (auto k = 0u; !try_lock(); ++k) {
if (k < 4)
continue;
#ifdef IMMER_SMT_PAUSE
else if (k < 16)
IMMER_SMT_PAUSE;
#endif
else
std::this_thread::yield();
}
}
void unlock() { v_.clear(std::memory_order_release); }
struct scoped_lock
{
scoped_lock(const scoped_lock&) = delete;
scoped_lock& operator=(const scoped_lock&) = delete;
explicit scoped_lock(spinlock& sp)
: sp_{sp}
{
sp.lock();
}
~scoped_lock() { sp_.unlock(); }
private:
spinlock& sp_;
};
};
/*!
* A reference counting policy implemented using an *atomic* `int`
* count. It is **thread-safe**.
*/
struct refcount_policy
{
using spinlock_type = spinlock;
mutable std::atomic<int> refcount;
refcount_policy()
: refcount{1} {};
refcount_policy(disowned)
: refcount{0}
{}
void inc() { refcount.fetch_add(1, std::memory_order_relaxed); }
bool dec() { return 1 == refcount.fetch_sub(1, std::memory_order_acq_rel); }
void dec_unsafe()
{
assert(refcount.load() > 1);
refcount.fetch_sub(1, std::memory_order_relaxed);
}
bool unique() { return refcount == 1; }
};
} // namespace immer

View file

@ -0,0 +1,40 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/refcount/no_refcount_policy.hpp>
#include <atomic>
#include <utility>
namespace immer {
/*!
* A reference counting policy implemented using a raw `int` count.
* It is **not thread-safe**.
*/
struct unsafe_refcount_policy
{
using spinlock_type = no_spinlock;
mutable int refcount;
unsafe_refcount_policy()
: refcount{1} {};
unsafe_refcount_policy(disowned)
: refcount{0}
{}
void inc() { ++refcount; }
bool dec() { return --refcount == 0; }
void dec_unsafe() { --refcount; }
bool unique() { return refcount == 1; }
};
} // namespace immer

198
third_party/immer/immer/set.hpp vendored Normal file
View file

@ -0,0 +1,198 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/hamts/champ.hpp>
#include <immer/detail/hamts/champ_iterator.hpp>
#include <immer/memory_policy.hpp>
#include <functional>
namespace immer {
template <typename T,
typename Hash,
typename Equal,
typename MemoryPolicy,
detail::hamts::bits_t B>
class set_transient;
/*!
* Immutable set representing an unordered bag of values.
*
* @tparam T The type of the values to be stored in the container.
* @tparam Hash The type of a function object capable of hashing
* values of type `T`.
* @tparam Equal The type of a function object capable of comparing
* values of type `T`.
* @tparam MemoryPolicy Memory management policy. See @ref
* memory_policy.
*
* @rst
*
* This container provides a good trade-off between cache locality,
* membership checks, update performance and structural sharing. It
* does so by storing the data in contiguous chunks of :math:`2^{B}`
* elements. When storing big objects, the size of these contiguous
* chunks can become too big, damaging performance. If this is
* measured to be problematic for a specific use-case, it can be
* solved by using a `immer::box` to wrap the type `T`.
*
* **Example**
* .. literalinclude:: ../example/set/intro.cpp
* :language: c++
* :start-after: intro/start
* :end-before: intro/end
*
* @endrst
*
*/
template <typename T,
typename Hash = std::hash<T>,
typename Equal = std::equal_to<T>,
typename MemoryPolicy = default_memory_policy,
detail::hamts::bits_t B = default_bits>
class set
{
using impl_t = detail::hamts::champ<T, Hash, Equal, MemoryPolicy, B>;
struct project_value_ptr
{
const T* operator()(const T& v) const noexcept { return &v; }
};
public:
using key_type = T;
using value_type = T;
using size_type = detail::hamts::size_t;
using diference_type = std::ptrdiff_t;
using hasher = Hash;
using key_equal = Equal;
using reference = const T&;
using const_reference = const T&;
using iterator =
detail::hamts::champ_iterator<T, Hash, Equal, MemoryPolicy, B>;
using const_iterator = iterator;
using transient_type = set_transient<T, Hash, Equal, MemoryPolicy, B>;
/*!
* Default constructor. It creates a set of `size() == 0`. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
set() = default;
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return {impl_}; }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const
{
return {impl_, typename iterator::end_t{}};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Returns `1` when `value` is contained in the set or `0`
* otherwise. It won't allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type count(const T& value) const
{
return impl_.template get<detail::constantly<size_type, 1>,
detail::constantly<size_type, 0>>(value);
}
/*!
* Returns a pointer to the value if `value` is contained in the
* set, or nullptr otherwise.
* It does not allocate memory and its complexity is *effectively*
* @f$ O(1) @f$.
*/
IMMER_NODISCARD const T* find(const T& value) const
{
return impl_.template get<project_value_ptr,
detail::constantly<const T*, nullptr>>(value);
}
/*!
* Returns whether the sets are equal.
*/
IMMER_NODISCARD bool operator==(const set& other) const
{
return impl_.equals(other.impl_);
}
IMMER_NODISCARD bool operator!=(const set& other) const
{
return !(*this == other);
}
/*!
* Returns a set containing `value`. If the `value` is already in
* the set, it returns the same set. It may allocate memory and
* its complexity is *effectively* @f$ O(1) @f$.
*/
IMMER_NODISCARD set insert(T value) const
{
return impl_.add(std::move(value));
}
/*!
* Returns a set without `value`. If the `value` is not in the
* set it returns the same set. It may allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
IMMER_NODISCARD set erase(const T& value) const { return impl_.sub(value); }
/*!
* Returns an @a transient form of this container, a
* `immer::set_transient`.
*/
IMMER_NODISCARD transient_type transient() const&
{
return transient_type{impl_};
}
IMMER_NODISCARD transient_type transient() &&
{
return transient_type{std::move(impl_)};
}
// Semi-private
const impl_t& impl() const { return impl_; }
private:
friend transient_type;
set(impl_t impl)
: impl_(std::move(impl))
{}
impl_t impl_ = impl_t::empty();
};
} // namespace immer

View file

@ -0,0 +1,40 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/hamts/champ.hpp>
#include <immer/memory_policy.hpp>
#include <functional>
namespace immer {
/*!
* @rst
*
* .. admonition:: Become a sponsor!
* :class: danger
*
* This component is planned but it has **not been implemented yet**.
*
* Transiens can critically improve the performance of applications
* intensively using ``set`` and ``map``. If you are working for an
* organization using the library in a commercial project, please consider
* **sponsoring this work**: juanpe@sinusoid.al
*
* @endrst
*/
template <typename T,
typename Hash = std::hash<T>,
typename Equal = std::equal_to<T>,
typename MemoryPolicy = default_memory_policy,
detail::hamts::bits_t B = default_bits>
class set_transient;
} // namespace immer

View file

@ -0,0 +1,110 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/heap/tags.hpp>
#include <atomic>
#include <memory>
#include <utility>
namespace immer {
/*!
* Provides transience ownership tracking when a *tracing garbage
* collector* is used instead of reference counting.
*
* @rst
*
* .. warning:: Using this policy without an allocation scheme that
* includes automatic tracing garbage collection may cause memory
* leaks.
*
* @endrst
*/
struct gc_transience_policy
{
template <typename HeapPolicy>
struct apply
{
struct type
{
using heap_ = typename HeapPolicy::type;
struct edit
{
void* v;
edit() = delete;
bool operator==(edit x) const { return v == x.v; }
bool operator!=(edit x) const { return v != x.v; }
};
struct owner
{
void* make_token_()
{
return heap_::allocate(1, norefs_tag{});
};
mutable std::atomic<void*> token_;
operator edit() { return {token_}; }
owner()
: token_{make_token_()}
{}
owner(const owner& o)
: token_{make_token_()}
{
o.token_ = make_token_();
}
owner(owner&& o) noexcept
: token_{o.token_.load()}
{}
owner& operator=(const owner& o)
{
o.token_ = make_token_();
token_ = make_token_();
return *this;
}
owner& operator=(owner&& o) noexcept
{
token_ = o.token_.load();
return *this;
}
};
struct ownee
{
edit token_{nullptr};
ownee& operator=(edit e)
{
assert(e != noone);
// This would be a nice safety plug but it sadly
// does not hold during transient concatenation.
// assert(token_ == e || token_ == edit{nullptr});
token_ = e;
return *this;
}
bool can_mutate(edit t) const { return token_ == t; }
bool owned() const { return token_ != edit{nullptr}; }
};
static owner noone;
};
};
};
template <typename HP>
typename gc_transience_policy::apply<HP>::type::owner
gc_transience_policy::apply<HP>::type::noone = {};
} // namespace immer

View file

@ -0,0 +1,48 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
namespace immer {
/*!
* Disables any special *transience* tracking. To be used when
* *reference counting* is available instead.
*/
struct no_transience_policy
{
template <typename>
struct apply
{
struct type
{
struct edit
{};
struct owner
{
operator edit() const { return {}; }
};
struct ownee
{
ownee& operator=(edit) { return *this; };
bool can_mutate(edit) const { return false; }
bool owned() const { return false; }
};
static owner noone;
};
};
};
template <typename HP>
typename no_transience_policy::apply<HP>::type::owner
no_transience_policy::apply<HP>::type::noone = {};
} // namespace immer

412
third_party/immer/immer/vector.hpp vendored Normal file
View file

@ -0,0 +1,412 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/rbts/rbtree.hpp>
#include <immer/detail/rbts/rbtree_iterator.hpp>
#include <immer/memory_policy.hpp>
#if IMMER_DEBUG_PRINT
#include <immer/flex_vector.hpp>
#endif
namespace immer {
template <typename T,
typename MemoryPolicy,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class flex_vector;
template <typename T,
typename MemoryPolicy,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class vector_transient;
/*!
* Immutable sequential container supporting both random access and
* structural sharing.
*
* @tparam T The type of the values to be stored in the container.
* @tparam MemoryPolicy Memory management policy. See @ref
* memory_policy.
*
* @rst
*
* This cotainer provides a good trade-off between cache locality,
* random access, update performance and structural sharing. It does
* so by storing the data in contiguous chunks of :math:`2^{BL}`
* elements. By default, when ``sizeof(T) == sizeof(void*)`` then
* :math:`B=BL=5`, such that data would be stored in contiguous
* chunks of :math:`32` elements.
*
* You may learn more about the meaning and implications of ``B`` and
* ``BL`` parameters in the :doc:`implementation` section.
*
* .. note:: In several methods we say that their complexity is
* *effectively* :math:`O(...)`. Do not confuse this with the word
* *amortized*, which has a very different meaning. In this
* context, *effective* means that while the
* mathematically rigurous
* complexity might be higher, for all practical matters the
* provided complexity is more useful to think about the actual
* cost of the operation.
*
* **Example**
* .. literalinclude:: ../example/vector/intro.cpp
* :language: c++
* :start-after: intro/start
* :end-before: intro/end
*
* @endrst
*/
template <typename T,
typename MemoryPolicy = default_memory_policy,
detail::rbts::bits_t B = default_bits,
detail::rbts::bits_t BL =
detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
class vector
{
using impl_t = detail::rbts::rbtree<T, MemoryPolicy, B, BL>;
using flex_t = flex_vector<T, MemoryPolicy, B, BL>;
using move_t =
std::integral_constant<bool, MemoryPolicy::use_transient_rvalues>;
public:
static constexpr auto bits = B;
static constexpr auto bits_leaf = BL;
using memory_policy = MemoryPolicy;
using value_type = T;
using reference = const T&;
using size_type = detail::rbts::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = detail::rbts::rbtree_iterator<T, MemoryPolicy, B, BL>;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using transient_type = vector_transient<T, MemoryPolicy, B, BL>;
/*!
* Default constructor. It creates a vector of `size() == 0`. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
vector() = default;
/*!
* Constructs a vector containing the elements in `values`.
*/
vector(std::initializer_list<T> values)
: impl_{impl_t::from_initializer_list(values)}
{}
/*!
* Constructs a vector containing the elements in the range
* defined by the input iterator `first` and range sentinel `last`.
*/
template <typename Iter,
typename Sent,
std::enable_if_t<detail::compatible_sentinel_v<Iter, Sent>,
bool> = true>
vector(Iter first, Sent last)
: impl_{impl_t::from_range(first, last)}
{}
/*!
* Constructs a vector containing the element `val` repeated `n`
* times.
*/
vector(size_type n, T v = {})
: impl_{impl_t::from_fill(n, v)}
{}
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return {impl_}; }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const
{
return {impl_, typename iterator::end_t{}};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing at the first element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rbegin() const
{
return reverse_iterator{end()};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing after the last element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rend() const
{
return reverse_iterator{begin()};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Access the last element.
*/
IMMER_NODISCARD const T& back() const { return impl_.back(); }
/*!
* Access the first element.
*/
IMMER_NODISCARD const T& front() const { return impl_.front(); }
/*!
* Returns a `const` reference to the element at position `index`.
* It is undefined when @f$ 0 index \geq size() @f$. It does not
* allocate memory and its complexity is *effectively* @f$ O(1)
* @f$.
*/
IMMER_NODISCARD reference operator[](size_type index) const
{
return impl_.get(index);
}
/*!
* Returns a `const` reference to the element at position
* `index`. It throws an `std::out_of_range` exception when @f$
* index \geq size() @f$. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
reference at(size_type index) const { return impl_.get_check(index); }
/*!
* Returns whether the vectors are equal.
*/
IMMER_NODISCARD bool operator==(const vector& other) const
{
return impl_.equals(other.impl_);
}
IMMER_NODISCARD bool operator!=(const vector& other) const
{
return !(*this == other);
}
/*!
* Returns a vector with `value` inserted at the end. It may
* allocate memory and its complexity is *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/vector/vector.cpp
* :language: c++
* :dedent: 8
* :start-after: push-back/start
* :end-before: push-back/end
*
* @endrst
*/
IMMER_NODISCARD vector push_back(value_type value) const&
{
return impl_.push_back(std::move(value));
}
IMMER_NODISCARD decltype(auto) push_back(value_type value) &&
{
return push_back_move(move_t{}, std::move(value));
}
/*!
* Returns a vector containing value `value` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/vector/vector.cpp
* :language: c++
* :dedent: 8
* :start-after: set/start
* :end-before: set/end
*
* @endrst
*/
IMMER_NODISCARD vector set(size_type index, value_type value) const&
{
return impl_.assoc(index, std::move(value));
}
IMMER_NODISCARD decltype(auto) set(size_type index, value_type value) &&
{
return set_move(move_t{}, index, std::move(value));
}
/*!
* Returns a vector containing the result of the expression
* `fn((*this)[idx])` at position `idx`.
* Undefined for `0 >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/vector/vector.cpp
* :language: c++
* :dedent: 8
* :start-after: update/start
* :end-before: update/end
*
* @endrst
*/
template <typename FnT>
IMMER_NODISCARD vector update(size_type index, FnT&& fn) const&
{
return impl_.update(index, std::forward<FnT>(fn));
}
template <typename FnT>
IMMER_NODISCARD decltype(auto) update(size_type index, FnT&& fn) &&
{
return update_move(move_t{}, index, std::forward<FnT>(fn));
}
/*!
* Returns a vector containing only the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*
* @rst
*
* **Example**
* .. literalinclude:: ../example/vector/vector.cpp
* :language: c++
* :dedent: 8
* :start-after: take/start
* :end-before: take/end
*
* @endrst
*/
IMMER_NODISCARD vector take(size_type elems) const&
{
return impl_.take(elems);
}
IMMER_NODISCARD decltype(auto) take(size_type elems) &&
{
return take_move(move_t{}, elems);
}
/*!
* Returns an @a transient form of this container, an
* `immer::vector_transient`.
*/
IMMER_NODISCARD transient_type transient() const&
{
return transient_type{impl_};
}
IMMER_NODISCARD transient_type transient() &&
{
return transient_type{std::move(impl_)};
}
// Semi-private
const impl_t& impl() const { return impl_; }
#if IMMER_DEBUG_PRINT
void debug_print(std::ostream& out = std::cerr) const
{
flex_t{*this}.debug_print(out);
}
#endif
private:
friend flex_t;
friend transient_type;
vector(impl_t impl)
: impl_(std::move(impl))
{
#if IMMER_DEBUG_PRINT
// force the compiler to generate debug_print, so we can call
// it from a debugger
[](volatile auto) {}(&vector::debug_print);
#endif
}
vector&& push_back_move(std::true_type, value_type value)
{
impl_.push_back_mut({}, std::move(value));
return std::move(*this);
}
vector push_back_move(std::false_type, value_type value)
{
return impl_.push_back(std::move(value));
}
vector&& set_move(std::true_type, size_type index, value_type value)
{
impl_.assoc_mut({}, index, std::move(value));
return std::move(*this);
}
vector set_move(std::false_type, size_type index, value_type value)
{
return impl_.assoc(index, std::move(value));
}
template <typename Fn>
vector&& update_move(std::true_type, size_type index, Fn&& fn)
{
impl_.update_mut({}, index, std::forward<Fn>(fn));
return std::move(*this);
}
template <typename Fn>
vector update_move(std::false_type, size_type index, Fn&& fn)
{
return impl_.update(index, std::forward<Fn>(fn));
}
vector&& take_move(std::true_type, size_type elems)
{
impl_.take_mut({}, elems);
return std::move(*this);
}
vector take_move(std::false_type, size_type elems)
{
return impl_.take(elems);
}
impl_t impl_ = impl_t::empty();
};
} // namespace immer

View file

@ -0,0 +1,203 @@
//
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#pragma once
#include <immer/detail/rbts/rbtree.hpp>
#include <immer/detail/rbts/rbtree_iterator.hpp>
#include <immer/memory_policy.hpp>
namespace immer {
template <typename T,
typename MemoryPolicy,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class vector;
template <typename T,
typename MemoryPolicy,
detail::rbts::bits_t B,
detail::rbts::bits_t BL>
class flex_vector_transient;
/*!
* Mutable version of `immer::vector`.
*
* @rst
*
* Refer to :doc:`transients` to learn more about when and how to use
* the mutable versions of immutable containers.
*
* @endrst
*/
template <typename T,
typename MemoryPolicy = default_memory_policy,
detail::rbts::bits_t B = default_bits,
detail::rbts::bits_t BL =
detail::rbts::derive_bits_leaf<T, MemoryPolicy, B>>
class vector_transient : MemoryPolicy::transience_t::owner
{
using impl_t = detail::rbts::rbtree<T, MemoryPolicy, B, BL>;
using flex_t = flex_vector_transient<T, MemoryPolicy, B, BL>;
using owner_t = typename MemoryPolicy::transience_t::owner;
public:
static constexpr auto bits = B;
static constexpr auto bits_leaf = BL;
using memory_policy = MemoryPolicy;
using value_type = T;
using reference = const T&;
using size_type = detail::rbts::size_t;
using difference_type = std::ptrdiff_t;
using const_reference = const T&;
using iterator = detail::rbts::rbtree_iterator<T, MemoryPolicy, B, BL>;
using const_iterator = iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using persistent_type = vector<T, MemoryPolicy, B, BL>;
/*!
* Default constructor. It creates a mutable vector of `size() ==
* 0`. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
vector_transient() = default;
/*!
* Returns an iterator pointing at the first element of the
* collection. It does not allocate memory and its complexity is
* @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator begin() const { return {impl_}; }
/*!
* Returns an iterator pointing just after the last element of the
* collection. It does not allocate and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD iterator end() const
{
return {impl_, typename iterator::end_t{}};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing at the first element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rbegin() const
{
return reverse_iterator{end()};
}
/*!
* Returns an iterator that traverses the collection backwards,
* pointing after the last element of the reversed collection. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD reverse_iterator rend() const
{
return reverse_iterator{begin()};
}
/*!
* Returns the number of elements in the container. It does
* not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD size_type size() const { return impl_.size; }
/*!
* Returns `true` if there are no elements in the container. It
* does not allocate memory and its complexity is @f$ O(1) @f$.
*/
IMMER_NODISCARD bool empty() const { return impl_.size == 0; }
/*!
* Returns a `const` reference to the element at position `index`.
* It is undefined when @f$ 0 index \geq size() @f$. It does not
* allocate memory and its complexity is *effectively* @f$ O(1)
* @f$.
*/
reference operator[](size_type index) const { return impl_.get(index); }
/*!
* Returns a `const` reference to the element at position
* `index`. It throws an `std::out_of_range` exception when @f$
* index \geq size() @f$. It does not allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
reference at(size_type index) const { return impl_.get_check(index); }
/*!
* Inserts `value` at the end. It may allocate memory and its
* complexity is *effectively* @f$ O(1) @f$.
*/
void push_back(value_type value)
{
impl_.push_back_mut(*this, std::move(value));
}
/*!
* Sets to the value `value` at position `idx`.
* Undefined for `index >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void set(size_type index, value_type value)
{
impl_.assoc_mut(*this, index, std::move(value));
}
/*!
* Updates the vector to contain the result of the expression
* `fn((*this)[idx])` at position `idx`.
* Undefined for `0 >= size()`.
* It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
template <typename FnT>
void update(size_type index, FnT&& fn)
{
impl_.update_mut(*this, index, std::forward<FnT>(fn));
}
/*!
* Resizes the vector to only contain the first `min(elems, size())`
* elements. It may allocate memory and its complexity is
* *effectively* @f$ O(1) @f$.
*/
void take(size_type elems) { impl_.take_mut(*this, elems); }
/*!
* Returns an @a immutable form of this container, an
* `immer::vector`.
*/
IMMER_NODISCARD persistent_type persistent() &
{
this->owner_t::operator=(owner_t{});
return persistent_type{impl_};
}
IMMER_NODISCARD persistent_type persistent() &&
{
return persistent_type{std::move(impl_)};
}
private:
friend flex_t;
friend persistent_type;
vector_transient(impl_t impl)
: impl_(std::move(impl))
{}
impl_t impl_ = impl_t::empty();
};
} // namespace immer