3026 lines
118 KiB
C++
3026 lines
118 KiB
C++
// Copyright 2018 The Abseil Authors.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
//
|
|
// An open-addressing
|
|
// hashtable with quadratic probing.
|
|
//
|
|
// This is a low level hashtable on top of which different interfaces can be
|
|
// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
|
|
//
|
|
// The table interface is similar to that of std::unordered_set. Notable
|
|
// differences are that most member functions support heterogeneous keys when
|
|
// BOTH the hash and eq functions are marked as transparent. They do so by
|
|
// providing a typedef called `is_transparent`.
|
|
//
|
|
// When heterogeneous lookup is enabled, functions that take key_type act as if
|
|
// they have an overload set like:
|
|
//
|
|
// iterator find(const key_type& key);
|
|
// template <class K>
|
|
// iterator find(const K& key);
|
|
//
|
|
// size_type erase(const key_type& key);
|
|
// template <class K>
|
|
// size_type erase(const K& key);
|
|
//
|
|
// std::pair<iterator, iterator> equal_range(const key_type& key);
|
|
// template <class K>
|
|
// std::pair<iterator, iterator> equal_range(const K& key);
|
|
//
|
|
// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
|
|
// exist.
|
|
//
|
|
// find() also supports passing the hash explicitly:
|
|
//
|
|
// iterator find(const key_type& key, size_t hash);
|
|
// template <class U>
|
|
// iterator find(const U& key, size_t hash);
|
|
//
|
|
// In addition the pointer to element and iterator stability guarantees are
|
|
// weaker: all iterators and pointers are invalidated after a new element is
|
|
// inserted.
|
|
//
|
|
// IMPLEMENTATION DETAILS
|
|
//
|
|
// # Table Layout
|
|
//
|
|
// A raw_hash_set's backing array consists of control bytes followed by slots
|
|
// that may or may not contain objects.
|
|
//
|
|
// The layout of the backing array, for `capacity` slots, is thus, as a
|
|
// pseudo-struct:
|
|
//
|
|
// struct BackingArray {
|
|
// // Sampling handler. This field isn't present when the sampling is
|
|
// // disabled or this allocation hasn't been selected for sampling.
|
|
// HashtablezInfoHandle infoz_;
|
|
// // The number of elements we can insert before growing the capacity.
|
|
// size_t growth_left;
|
|
// // Control bytes for the "real" slots.
|
|
// ctrl_t ctrl[capacity];
|
|
// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
|
|
// // stop and serves no other purpose.
|
|
// ctrl_t sentinel;
|
|
// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
|
|
// // that if a probe sequence picks a value near the end of `ctrl`,
|
|
// // `Group` will have valid control bytes to look at.
|
|
// ctrl_t clones[kWidth - 1];
|
|
// // The actual slot data.
|
|
// slot_type slots[capacity];
|
|
// };
|
|
//
|
|
// The length of this array is computed by `AllocSize()` below.
|
|
//
|
|
// Control bytes (`ctrl_t`) are bytes (collected into groups of a
|
|
// platform-specific size) that define the state of the corresponding slot in
|
|
// the slot array. Group manipulation is tightly optimized to be as efficient
|
|
// as possible: SSE and friends on x86, clever bit operations on other arches.
|
|
//
|
|
// Group 1 Group 2 Group 3
|
|
// +---------------+---------------+---------------+
|
|
// | | | | | | | | | | | | | | | | | | | | | | | | |
|
|
// +---------------+---------------+---------------+
|
|
//
|
|
// Each control byte is either a special value for empty slots, deleted slots
|
|
// (sometimes called *tombstones*), and a special end-of-table marker used by
|
|
// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
|
|
// corresponding slot.
|
|
//
|
|
// Storing control bytes in a separate array also has beneficial cache effects,
|
|
// since more logical slots will fit into a cache line.
|
|
//
|
|
// # Hashing
|
|
//
|
|
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
|
|
// `H1(hash(x))` is an index into `slots`, and essentially the starting point
|
|
// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
|
|
// objects that cannot possibly be the one we are looking for.
|
|
//
|
|
// # Table operations.
|
|
//
|
|
// The key operations are `insert`, `find`, and `erase`.
|
|
//
|
|
// Since `insert` and `erase` are implemented in terms of `find`, we describe
|
|
// `find` first. To `find` a value `x`, we compute `hash(x)`. From
|
|
// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
|
|
// group of slots in some interesting order.
|
|
//
|
|
// We now walk through these indices. At each index, we select the entire group
|
|
// starting with that index and extract potential candidates: occupied slots
|
|
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
|
|
// group, we stop and return an error. Each candidate slot `y` is compared with
|
|
// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
|
|
// next probe index. Tombstones effectively behave like full slots that never
|
|
// match the value we're looking for.
|
|
//
|
|
// The `H2` bits ensure when we compare a slot to an object with `==`, we are
|
|
// likely to have actually found the object. That is, the chance is low that
|
|
// `==` is called and returns `false`. Thus, when we search for an object, we
|
|
// are unlikely to call `==` many times. This likelyhood can be analyzed as
|
|
// follows (assuming that H2 is a random enough hash function).
|
|
//
|
|
// Let's assume that there are `k` "wrong" objects that must be examined in a
|
|
// probe sequence. For example, when doing a `find` on an object that is in the
|
|
// table, `k` is the number of objects between the start of the probe sequence
|
|
// and the final found object (not including the final found object). The
|
|
// expected number of objects with an H2 match is then `k/128`. Measurements
|
|
// and analysis indicate that even at high load factors, `k` is less than 32,
|
|
// meaning that the number of "false positive" comparisons we must perform is
|
|
// less than 1/8 per `find`.
|
|
|
|
// `insert` is implemented in terms of `unchecked_insert`, which inserts a
|
|
// value presumed to not be in the table (violating this requirement will cause
|
|
// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
|
|
// it, we construct a `probe_seq` once again, and use it to find the first
|
|
// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
|
|
// first such slot in the group and mark it as full with `x`'s H2.
|
|
//
|
|
// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
|
|
// perform a `find` to see if it's already present; if it is, we're done. If
|
|
// it's not, we may decide the table is getting overcrowded (i.e. the load
|
|
// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
|
|
// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
|
|
// each element of the table into the new array (we know that no insertion here
|
|
// will insert an already-present value), and discard the old backing array. At
|
|
// this point, we may `unchecked_insert` the value `x`.
|
|
//
|
|
// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
|
|
// presents a viable, initialized slot pointee to the caller.
|
|
//
|
|
// `erase` is implemented in terms of `erase_at`, which takes an index to a
|
|
// slot. Given an offset, we simply create a tombstone and destroy its contents.
|
|
// If we can prove that the slot would not appear in a probe sequence, we can
|
|
// make the slot as empty, instead. We can prove this by observing that if a
|
|
// group has any empty slots, it has never been full (assuming we never create
|
|
// an empty slot in a group with no empties, which this heuristic guarantees we
|
|
// never do) and find would stop at this group anyways (since it does not probe
|
|
// beyond groups with empties).
|
|
//
|
|
// `erase` is `erase_at` composed with `find`: if we
|
|
// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
|
|
// slot.
|
|
//
|
|
// To iterate, we simply traverse the array, skipping empty and deleted slots
|
|
// and stopping when we hit a `kSentinel`.
|
|
|
|
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
|
|
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <cmath>
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <cstring>
|
|
#include <initializer_list>
|
|
#include <iterator>
|
|
#include <limits>
|
|
#include <memory>
|
|
#include <tuple>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
#include "absl/base/attributes.h"
|
|
#include "absl/base/config.h"
|
|
#include "absl/base/internal/endian.h"
|
|
#include "absl/base/internal/raw_logging.h"
|
|
#include "absl/base/macros.h"
|
|
#include "absl/base/optimization.h"
|
|
#include "absl/base/options.h"
|
|
#include "absl/base/port.h"
|
|
#include "absl/base/prefetch.h"
|
|
#include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle
|
|
#include "absl/container/internal/compressed_tuple.h"
|
|
#include "absl/container/internal/container_memory.h"
|
|
#include "absl/container/internal/hash_policy_traits.h"
|
|
#include "absl/container/internal/hashtable_debug_hooks.h"
|
|
#include "absl/container/internal/hashtablez_sampler.h"
|
|
#include "absl/memory/memory.h"
|
|
#include "absl/meta/type_traits.h"
|
|
#include "absl/numeric/bits.h"
|
|
#include "absl/utility/utility.h"
|
|
|
|
#ifdef ABSL_INTERNAL_HAVE_SSE2
|
|
#include <emmintrin.h>
|
|
#endif
|
|
|
|
#ifdef ABSL_INTERNAL_HAVE_SSSE3
|
|
#include <tmmintrin.h>
|
|
#endif
|
|
|
|
#ifdef _MSC_VER
|
|
#include <intrin.h>
|
|
#endif
|
|
|
|
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
|
|
#include <arm_neon.h>
|
|
#endif
|
|
|
|
namespace absl {
|
|
ABSL_NAMESPACE_BEGIN
|
|
namespace container_internal {
|
|
|
|
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
|
|
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
|
|
#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
|
|
defined(ABSL_HAVE_MEMORY_SANITIZER)
|
|
// When compiled in sanitizer mode, we add generation integers to the backing
|
|
// array and iterators. In the backing array, we store the generation between
|
|
// the control bytes and the slots. When iterators are dereferenced, we assert
|
|
// that the container has not been mutated in a way that could cause iterator
|
|
// invalidation since the iterator was initialized.
|
|
#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
|
|
#endif
|
|
|
|
// We use uint8_t so we don't need to worry about padding.
|
|
using GenerationType = uint8_t;
|
|
|
|
// A sentinel value for empty generations. Using 0 makes it easy to constexpr
|
|
// initialize an array of this value.
|
|
constexpr GenerationType SentinelEmptyGeneration() { return 0; }
|
|
|
|
constexpr GenerationType NextGeneration(GenerationType generation) {
|
|
return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
|
|
}
|
|
|
|
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
|
|
constexpr bool SwisstableGenerationsEnabled() { return true; }
|
|
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
|
|
#else
|
|
constexpr bool SwisstableGenerationsEnabled() { return false; }
|
|
constexpr size_t NumGenerationBytes() { return 0; }
|
|
#endif
|
|
|
|
template <typename AllocType>
|
|
void SwapAlloc(AllocType& lhs, AllocType& rhs,
|
|
std::true_type /* propagate_on_container_swap */) {
|
|
using std::swap;
|
|
swap(lhs, rhs);
|
|
}
|
|
template <typename AllocType>
|
|
void SwapAlloc(AllocType& lhs, AllocType& rhs,
|
|
std::false_type /* propagate_on_container_swap */) {
|
|
(void)lhs;
|
|
(void)rhs;
|
|
assert(lhs == rhs &&
|
|
"It's UB to call swap with unequal non-propagating allocators.");
|
|
}
|
|
|
|
template <typename AllocType>
|
|
void CopyAlloc(AllocType& lhs, AllocType& rhs,
|
|
std::true_type /* propagate_alloc */) {
|
|
lhs = rhs;
|
|
}
|
|
template <typename AllocType>
|
|
void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
|
|
|
|
// The state for a probe sequence.
|
|
//
|
|
// Currently, the sequence is a triangular progression of the form
|
|
//
|
|
// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
|
|
//
|
|
// The use of `Width` ensures that each probe step does not overlap groups;
|
|
// the sequence effectively outputs the addresses of *groups* (although not
|
|
// necessarily aligned to any boundary). The `Group` machinery allows us
|
|
// to check an entire group with minimal branching.
|
|
//
|
|
// Wrapping around at `mask + 1` is important, but not for the obvious reason.
|
|
// As described above, the first few entries of the control byte array
|
|
// are mirrored at the end of the array, which `Group` will find and use
|
|
// for selecting candidates. However, when those candidates' slots are
|
|
// actually inspected, there are no corresponding slots for the cloned bytes,
|
|
// so we need to make sure we've treated those offsets as "wrapping around".
|
|
//
|
|
// It turns out that this probe sequence visits every group exactly once if the
|
|
// number of groups is a power of two, since (i^2+i)/2 is a bijection in
|
|
// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
|
|
template <size_t Width>
|
|
class probe_seq {
|
|
public:
|
|
// Creates a new probe sequence using `hash` as the initial value of the
|
|
// sequence and `mask` (usually the capacity of the table) as the mask to
|
|
// apply to each value in the progression.
|
|
probe_seq(size_t hash, size_t mask) {
|
|
assert(((mask + 1) & mask) == 0 && "not a mask");
|
|
mask_ = mask;
|
|
offset_ = hash & mask_;
|
|
}
|
|
|
|
// The offset within the table, i.e., the value `p(i)` above.
|
|
size_t offset() const { return offset_; }
|
|
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
|
|
|
|
void next() {
|
|
index_ += Width;
|
|
offset_ += index_;
|
|
offset_ &= mask_;
|
|
}
|
|
// 0-based probe index, a multiple of `Width`.
|
|
size_t index() const { return index_; }
|
|
|
|
private:
|
|
size_t mask_;
|
|
size_t offset_;
|
|
size_t index_ = 0;
|
|
};
|
|
|
|
template <class ContainerKey, class Hash, class Eq>
|
|
struct RequireUsableKey {
|
|
template <class PassedKey, class... Args>
|
|
std::pair<
|
|
decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
|
|
decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
|
|
std::declval<const PassedKey&>()))>*
|
|
operator()(const PassedKey&, const Args&...) const;
|
|
};
|
|
|
|
template <class E, class Policy, class Hash, class Eq, class... Ts>
|
|
struct IsDecomposable : std::false_type {};
|
|
|
|
template <class Policy, class Hash, class Eq, class... Ts>
|
|
struct IsDecomposable<
|
|
absl::void_t<decltype(Policy::apply(
|
|
RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
|
|
std::declval<Ts>()...))>,
|
|
Policy, Hash, Eq, Ts...> : std::true_type {};
|
|
|
|
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
|
|
template <class T>
|
|
constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
|
|
using std::swap;
|
|
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
|
|
}
|
|
template <class T>
|
|
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
|
|
return false;
|
|
}
|
|
|
|
template <typename T>
|
|
uint32_t TrailingZeros(T x) {
|
|
ABSL_ASSUME(x != 0);
|
|
return static_cast<uint32_t>(countr_zero(x));
|
|
}
|
|
|
|
// An abstract bitmask, such as that emitted by a SIMD instruction.
|
|
//
|
|
// Specifically, this type implements a simple bitset whose representation is
|
|
// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
|
|
// of abstract bits in the bitset, while `Shift` is the log-base-two of the
|
|
// width of an abstract bit in the representation.
|
|
// This mask provides operations for any number of real bits set in an abstract
|
|
// bit. To add iteration on top of that, implementation must guarantee no more
|
|
// than the most significant real bit is set in a set abstract bit.
|
|
template <class T, int SignificantBits, int Shift = 0>
|
|
class NonIterableBitMask {
|
|
public:
|
|
explicit NonIterableBitMask(T mask) : mask_(mask) {}
|
|
|
|
explicit operator bool() const { return this->mask_ != 0; }
|
|
|
|
// Returns the index of the lowest *abstract* bit set in `self`.
|
|
uint32_t LowestBitSet() const {
|
|
return container_internal::TrailingZeros(mask_) >> Shift;
|
|
}
|
|
|
|
// Returns the index of the highest *abstract* bit set in `self`.
|
|
uint32_t HighestBitSet() const {
|
|
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
|
|
}
|
|
|
|
// Returns the number of trailing zero *abstract* bits.
|
|
uint32_t TrailingZeros() const {
|
|
return container_internal::TrailingZeros(mask_) >> Shift;
|
|
}
|
|
|
|
// Returns the number of leading zero *abstract* bits.
|
|
uint32_t LeadingZeros() const {
|
|
constexpr int total_significant_bits = SignificantBits << Shift;
|
|
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
|
|
return static_cast<uint32_t>(
|
|
countl_zero(static_cast<T>(mask_ << extra_bits))) >>
|
|
Shift;
|
|
}
|
|
|
|
T mask_;
|
|
};
|
|
|
|
// Mask that can be iterable
|
|
//
|
|
// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
|
|
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
|
|
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
|
|
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
|
|
//
|
|
// For example:
|
|
// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
|
|
// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
|
|
template <class T, int SignificantBits, int Shift = 0>
|
|
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
|
|
using Base = NonIterableBitMask<T, SignificantBits, Shift>;
|
|
static_assert(std::is_unsigned<T>::value, "");
|
|
static_assert(Shift == 0 || Shift == 3, "");
|
|
|
|
public:
|
|
explicit BitMask(T mask) : Base(mask) {}
|
|
// BitMask is an iterator over the indices of its abstract bits.
|
|
using value_type = int;
|
|
using iterator = BitMask;
|
|
using const_iterator = BitMask;
|
|
|
|
BitMask& operator++() {
|
|
if (Shift == 3) {
|
|
constexpr uint64_t msbs = 0x8080808080808080ULL;
|
|
this->mask_ &= msbs;
|
|
}
|
|
this->mask_ &= (this->mask_ - 1);
|
|
return *this;
|
|
}
|
|
|
|
uint32_t operator*() const { return Base::LowestBitSet(); }
|
|
|
|
BitMask begin() const { return *this; }
|
|
BitMask end() const { return BitMask(0); }
|
|
|
|
private:
|
|
friend bool operator==(const BitMask& a, const BitMask& b) {
|
|
return a.mask_ == b.mask_;
|
|
}
|
|
friend bool operator!=(const BitMask& a, const BitMask& b) {
|
|
return a.mask_ != b.mask_;
|
|
}
|
|
};
|
|
|
|
using h2_t = uint8_t;
|
|
|
|
// The values here are selected for maximum performance. See the static asserts
|
|
// below for details.
|
|
|
|
// A `ctrl_t` is a single control byte, which can have one of four
|
|
// states: empty, deleted, full (which has an associated seven-bit h2_t value)
|
|
// and the sentinel. They have the following bit patterns:
|
|
//
|
|
// empty: 1 0 0 0 0 0 0 0
|
|
// deleted: 1 1 1 1 1 1 1 0
|
|
// full: 0 h h h h h h h // h represents the hash bits.
|
|
// sentinel: 1 1 1 1 1 1 1 1
|
|
//
|
|
// These values are specifically tuned for SSE-flavored SIMD.
|
|
// The static_asserts below detail the source of these choices.
|
|
//
|
|
// We use an enum class so that when strict aliasing is enabled, the compiler
|
|
// knows ctrl_t doesn't alias other types.
|
|
enum class ctrl_t : int8_t {
|
|
kEmpty = -128, // 0b10000000
|
|
kDeleted = -2, // 0b11111110
|
|
kSentinel = -1, // 0b11111111
|
|
};
|
|
static_assert(
|
|
(static_cast<int8_t>(ctrl_t::kEmpty) &
|
|
static_cast<int8_t>(ctrl_t::kDeleted) &
|
|
static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
|
|
"Special markers need to have the MSB to make checking for them efficient");
|
|
static_assert(
|
|
ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
|
|
"ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
|
|
"ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
|
|
static_assert(
|
|
ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
|
|
"ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
|
|
"registers (pcmpeqd xmm, xmm)");
|
|
static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
|
|
"ctrl_t::kEmpty must be -128 to make the SIMD check for its "
|
|
"existence efficient (psignb xmm, xmm)");
|
|
static_assert(
|
|
(~static_cast<int8_t>(ctrl_t::kEmpty) &
|
|
~static_cast<int8_t>(ctrl_t::kDeleted) &
|
|
static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
|
|
"ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
|
|
"shared by ctrl_t::kSentinel to make the scalar test for "
|
|
"MaskEmptyOrDeleted() efficient");
|
|
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
|
|
"ctrl_t::kDeleted must be -2 to make the implementation of "
|
|
"ConvertSpecialToEmptyAndFullToDeleted efficient");
|
|
|
|
// See definition comment for why this is size 32.
|
|
ABSL_DLL extern const ctrl_t kEmptyGroup[32];
|
|
|
|
// Returns a pointer to a control byte group that can be used by empty tables.
|
|
inline ctrl_t* EmptyGroup() {
|
|
// Const must be cast away here; no uses of this function will actually write
|
|
// to it, because it is only used for empty tables.
|
|
return const_cast<ctrl_t*>(kEmptyGroup + 16);
|
|
}
|
|
|
|
// Returns a pointer to a generation to use for an empty hashtable.
|
|
GenerationType* EmptyGeneration();
|
|
|
|
// Returns whether `generation` is a generation for an empty hashtable that
|
|
// could be returned by EmptyGeneration().
|
|
inline bool IsEmptyGeneration(const GenerationType* generation) {
|
|
return *generation == SentinelEmptyGeneration();
|
|
}
|
|
|
|
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
|
|
// randomize insertion order within groups.
|
|
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
|
|
|
|
// Returns a per-table, hash salt, which changes on resize. This gets mixed into
|
|
// H1 to randomize iteration order per-table.
|
|
//
|
|
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
|
|
// non-determinism of iteration order in most cases.
|
|
inline size_t PerTableSalt(const ctrl_t* ctrl) {
|
|
// The low bits of the pointer have little or no entropy because of
|
|
// alignment. We shift the pointer to try to use higher entropy bits. A
|
|
// good number seems to be 12 bits, because that aligns with page size.
|
|
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
|
|
}
|
|
// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
|
|
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
|
|
return (hash >> 7) ^ PerTableSalt(ctrl);
|
|
}
|
|
|
|
// Extracts the H2 portion of a hash: the 7 bits not used for H1.
|
|
//
|
|
// These are used as an occupied control byte.
|
|
inline h2_t H2(size_t hash) { return hash & 0x7F; }
|
|
|
|
// Helpers for checking the state of a control byte.
|
|
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
|
|
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
|
|
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
|
|
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
|
|
|
|
#ifdef ABSL_INTERNAL_HAVE_SSE2
|
|
// Quick reference guide for intrinsics used below:
|
|
//
|
|
// * __m128i: An XMM (128-bit) word.
|
|
//
|
|
// * _mm_setzero_si128: Returns a zero vector.
|
|
// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
|
|
//
|
|
// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
|
|
// * _mm_and_si128: Ands two i128s together.
|
|
// * _mm_or_si128: Ors two i128s together.
|
|
// * _mm_andnot_si128: And-nots two i128s together.
|
|
//
|
|
// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
|
|
// filling each lane with 0x00 or 0xff.
|
|
// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
|
|
//
|
|
// * _mm_loadu_si128: Performs an unaligned load of an i128.
|
|
// * _mm_storeu_si128: Performs an unaligned store of an i128.
|
|
//
|
|
// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
|
|
// argument if the corresponding lane of the second
|
|
// argument is positive, negative, or zero, respectively.
|
|
// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
|
|
// bitmask consisting of those bits.
|
|
// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
|
|
// four bits of each i8 lane in the second argument as
|
|
// indices.
|
|
|
|
// https://github.com/abseil/abseil-cpp/issues/209
|
|
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
|
|
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
|
|
// Work around this by using the portable implementation of Group
|
|
// when using -funsigned-char under GCC.
|
|
inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
|
|
#if defined(__GNUC__) && !defined(__clang__)
|
|
if (std::is_unsigned<char>::value) {
|
|
const __m128i mask = _mm_set1_epi8(0x80);
|
|
const __m128i diff = _mm_subs_epi8(b, a);
|
|
return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
|
|
}
|
|
#endif
|
|
return _mm_cmpgt_epi8(a, b);
|
|
}
|
|
|
|
struct GroupSse2Impl {
|
|
static constexpr size_t kWidth = 16; // the number of slots per group
|
|
|
|
explicit GroupSse2Impl(const ctrl_t* pos) {
|
|
ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
|
|
}
|
|
|
|
// Returns a bitmask representing the positions of slots that match hash.
|
|
BitMask<uint16_t, kWidth> Match(h2_t hash) const {
|
|
auto match = _mm_set1_epi8(static_cast<char>(hash));
|
|
BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0);
|
|
result = BitMask<uint16_t, kWidth>(
|
|
static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
|
|
return result;
|
|
}
|
|
|
|
// Returns a bitmask representing the positions of empty slots.
|
|
NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
|
|
#ifdef ABSL_INTERNAL_HAVE_SSSE3
|
|
// This only works because ctrl_t::kEmpty is -128.
|
|
return NonIterableBitMask<uint16_t, kWidth>(
|
|
static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
|
|
#else
|
|
auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
|
|
return NonIterableBitMask<uint16_t, kWidth>(
|
|
static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
|
|
#endif
|
|
}
|
|
|
|
// Returns a bitmask representing the positions of empty or deleted slots.
|
|
NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
|
|
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
|
|
return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
|
|
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
|
|
}
|
|
|
|
// Returns the number of trailing empty or deleted elements in the group.
|
|
uint32_t CountLeadingEmptyOrDeleted() const {
|
|
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
|
|
return TrailingZeros(static_cast<uint32_t>(
|
|
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
|
|
}
|
|
|
|
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
|
|
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
|
|
auto x126 = _mm_set1_epi8(126);
|
|
#ifdef ABSL_INTERNAL_HAVE_SSSE3
|
|
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
|
|
#else
|
|
auto zero = _mm_setzero_si128();
|
|
auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
|
|
auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
|
|
#endif
|
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
|
|
}
|
|
|
|
__m128i ctrl;
|
|
};
|
|
#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
|
|
|
|
#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
|
|
struct GroupAArch64Impl {
|
|
static constexpr size_t kWidth = 8;
|
|
|
|
explicit GroupAArch64Impl(const ctrl_t* pos) {
|
|
ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
|
|
}
|
|
|
|
BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
|
|
uint8x8_t dup = vdup_n_u8(hash);
|
|
auto mask = vceq_u8(ctrl, dup);
|
|
return BitMask<uint64_t, kWidth, 3>(
|
|
vget_lane_u64(vreinterpret_u64_u8(mask), 0));
|
|
}
|
|
|
|
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
|
|
uint64_t mask =
|
|
vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
|
|
vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
|
|
vreinterpret_s8_u8(ctrl))),
|
|
0);
|
|
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
|
|
}
|
|
|
|
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
|
|
uint64_t mask =
|
|
vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
|
|
vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
|
|
vreinterpret_s8_u8(ctrl))),
|
|
0);
|
|
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
|
|
}
|
|
|
|
uint32_t CountLeadingEmptyOrDeleted() const {
|
|
uint64_t mask =
|
|
vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
|
|
vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
|
|
vreinterpret_s8_u8(ctrl))),
|
|
0);
|
|
// Similar to MaskEmptyorDeleted() but we invert the logic to invert the
|
|
// produced bitfield. We then count number of trailing zeros.
|
|
// Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
|
|
// so we should be fine.
|
|
return static_cast<uint32_t>(countr_zero(mask)) >> 3;
|
|
}
|
|
|
|
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
|
|
uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
|
|
constexpr uint64_t msbs = 0x8080808080808080ULL;
|
|
constexpr uint64_t slsbs = 0x0202020202020202ULL;
|
|
constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
|
|
auto x = slsbs & (mask >> 6);
|
|
auto res = (x + midbs) | msbs;
|
|
little_endian::Store64(dst, res);
|
|
}
|
|
|
|
uint8x8_t ctrl;
|
|
};
|
|
#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
|
|
|
|
struct GroupPortableImpl {
|
|
static constexpr size_t kWidth = 8;
|
|
|
|
explicit GroupPortableImpl(const ctrl_t* pos)
|
|
: ctrl(little_endian::Load64(pos)) {}
|
|
|
|
BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
|
|
// For the technique, see:
|
|
// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
|
|
// (Determine if a word has a byte equal to n).
|
|
//
|
|
// Caveat: there are false positives but:
|
|
// - they only occur if there is a real match
|
|
// - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
|
|
// - they will be handled gracefully by subsequent checks in code
|
|
//
|
|
// Example:
|
|
// v = 0x1716151413121110
|
|
// hash = 0x12
|
|
// retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
|
|
constexpr uint64_t msbs = 0x8080808080808080ULL;
|
|
constexpr uint64_t lsbs = 0x0101010101010101ULL;
|
|
auto x = ctrl ^ (lsbs * hash);
|
|
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
|
|
}
|
|
|
|
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
|
|
constexpr uint64_t msbs = 0x8080808080808080ULL;
|
|
return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
|
|
msbs);
|
|
}
|
|
|
|
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
|
|
constexpr uint64_t msbs = 0x8080808080808080ULL;
|
|
return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
|
|
msbs);
|
|
}
|
|
|
|
uint32_t CountLeadingEmptyOrDeleted() const {
|
|
// ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
|
|
// kDeleted. We lower all other bits and count number of trailing zeros.
|
|
constexpr uint64_t bits = 0x0101010101010101ULL;
|
|
return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
|
|
3);
|
|
}
|
|
|
|
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
|
|
constexpr uint64_t msbs = 0x8080808080808080ULL;
|
|
constexpr uint64_t lsbs = 0x0101010101010101ULL;
|
|
auto x = ctrl & msbs;
|
|
auto res = (~x + (x >> 7)) & ~lsbs;
|
|
little_endian::Store64(dst, res);
|
|
}
|
|
|
|
uint64_t ctrl;
|
|
};
|
|
|
|
#ifdef ABSL_INTERNAL_HAVE_SSE2
|
|
using Group = GroupSse2Impl;
|
|
using GroupEmptyOrDeleted = GroupSse2Impl;
|
|
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
|
|
using Group = GroupAArch64Impl;
|
|
// For Aarch64, we use the portable implementation for counting and masking
|
|
// empty or deleted group elements. This is to avoid the latency of moving
|
|
// between data GPRs and Neon registers when it does not provide a benefit.
|
|
// Using Neon is profitable when we call Match(), but is not when we don't,
|
|
// which is the case when we do *EmptyOrDeleted operations. It is difficult to
|
|
// make a similar approach beneficial on other architectures such as x86 since
|
|
// they have much lower GPR <-> vector register transfer latency and 16-wide
|
|
// Groups.
|
|
using GroupEmptyOrDeleted = GroupPortableImpl;
|
|
#else
|
|
using Group = GroupPortableImpl;
|
|
using GroupEmptyOrDeleted = GroupPortableImpl;
|
|
#endif
|
|
|
|
// When there is an insertion with no reserved growth, we rehash with
|
|
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
|
|
// constant divided by capacity ensures that inserting N elements is still O(N)
|
|
// in the average case. Using the constant 16 means that we expect to rehash ~8
|
|
// times more often than when generations are disabled. We are adding expected
|
|
// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
|
|
// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
|
|
inline size_t RehashProbabilityConstant() { return 16; }
|
|
|
|
class CommonFieldsGenerationInfoEnabled {
|
|
// A sentinel value for reserved_growth_ indicating that we just ran out of
|
|
// reserved growth on the last insertion. When reserve is called and then
|
|
// insertions take place, reserved_growth_'s state machine is N, ..., 1,
|
|
// kReservedGrowthJustRanOut, 0.
|
|
static constexpr size_t kReservedGrowthJustRanOut =
|
|
(std::numeric_limits<size_t>::max)();
|
|
|
|
public:
|
|
CommonFieldsGenerationInfoEnabled() = default;
|
|
CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
|
|
: reserved_growth_(that.reserved_growth_),
|
|
reservation_size_(that.reservation_size_),
|
|
generation_(that.generation_) {
|
|
that.reserved_growth_ = 0;
|
|
that.reservation_size_ = 0;
|
|
that.generation_ = EmptyGeneration();
|
|
}
|
|
CommonFieldsGenerationInfoEnabled& operator=(
|
|
CommonFieldsGenerationInfoEnabled&&) = default;
|
|
|
|
// Whether we should rehash on insert in order to detect bugs of using invalid
|
|
// references. We rehash on the first insertion after reserved_growth_ reaches
|
|
// 0 after a call to reserve. We also do a rehash with low probability
|
|
// whenever reserved_growth_ is zero.
|
|
bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
|
|
size_t capacity) const;
|
|
// Similar to above, except that we don't depend on reserved_growth_.
|
|
bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
|
|
size_t capacity) const;
|
|
void maybe_increment_generation_on_insert() {
|
|
if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
|
|
|
|
if (reserved_growth_ > 0) {
|
|
if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
|
|
} else {
|
|
increment_generation();
|
|
}
|
|
}
|
|
void increment_generation() { *generation_ = NextGeneration(*generation_); }
|
|
void reset_reserved_growth(size_t reservation, size_t size) {
|
|
reserved_growth_ = reservation - size;
|
|
}
|
|
size_t reserved_growth() const { return reserved_growth_; }
|
|
void set_reserved_growth(size_t r) { reserved_growth_ = r; }
|
|
size_t reservation_size() const { return reservation_size_; }
|
|
void set_reservation_size(size_t r) { reservation_size_ = r; }
|
|
GenerationType generation() const { return *generation_; }
|
|
void set_generation(GenerationType g) { *generation_ = g; }
|
|
GenerationType* generation_ptr() const { return generation_; }
|
|
void set_generation_ptr(GenerationType* g) { generation_ = g; }
|
|
|
|
private:
|
|
// The number of insertions remaining that are guaranteed to not rehash due to
|
|
// a prior call to reserve. Note: we store reserved growth in addition to
|
|
// reservation size because calls to erase() decrease size_ but don't decrease
|
|
// reserved growth.
|
|
size_t reserved_growth_ = 0;
|
|
// The maximum argument to reserve() since the container was cleared. We need
|
|
// to keep track of this, in addition to reserved growth, because we reset
|
|
// reserved growth to this when erase(begin(), end()) is called.
|
|
size_t reservation_size_ = 0;
|
|
// Pointer to the generation counter, which is used to validate iterators and
|
|
// is stored in the backing array between the control bytes and the slots.
|
|
// Note that we can't store the generation inside the container itself and
|
|
// keep a pointer to the container in the iterators because iterators must
|
|
// remain valid when the container is moved.
|
|
// Note: we could derive this pointer from the control pointer, but it makes
|
|
// the code more complicated, and there's a benefit in having the sizes of
|
|
// raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
|
|
// which is that tests are less likely to rely on the size remaining the same.
|
|
GenerationType* generation_ = EmptyGeneration();
|
|
};
|
|
|
|
class CommonFieldsGenerationInfoDisabled {
|
|
public:
|
|
CommonFieldsGenerationInfoDisabled() = default;
|
|
CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
|
|
default;
|
|
CommonFieldsGenerationInfoDisabled& operator=(
|
|
CommonFieldsGenerationInfoDisabled&&) = default;
|
|
|
|
bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
|
|
return false;
|
|
}
|
|
bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
|
|
return false;
|
|
}
|
|
void maybe_increment_generation_on_insert() {}
|
|
void increment_generation() {}
|
|
void reset_reserved_growth(size_t, size_t) {}
|
|
size_t reserved_growth() const { return 0; }
|
|
void set_reserved_growth(size_t) {}
|
|
size_t reservation_size() const { return 0; }
|
|
void set_reservation_size(size_t) {}
|
|
GenerationType generation() const { return 0; }
|
|
void set_generation(GenerationType) {}
|
|
GenerationType* generation_ptr() const { return nullptr; }
|
|
void set_generation_ptr(GenerationType*) {}
|
|
};
|
|
|
|
class HashSetIteratorGenerationInfoEnabled {
|
|
public:
|
|
HashSetIteratorGenerationInfoEnabled() = default;
|
|
explicit HashSetIteratorGenerationInfoEnabled(
|
|
const GenerationType* generation_ptr)
|
|
: generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
|
|
|
|
GenerationType generation() const { return generation_; }
|
|
void reset_generation() { generation_ = *generation_ptr_; }
|
|
const GenerationType* generation_ptr() const { return generation_ptr_; }
|
|
void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
|
|
|
|
private:
|
|
const GenerationType* generation_ptr_ = EmptyGeneration();
|
|
GenerationType generation_ = *generation_ptr_;
|
|
};
|
|
|
|
class HashSetIteratorGenerationInfoDisabled {
|
|
public:
|
|
HashSetIteratorGenerationInfoDisabled() = default;
|
|
explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
|
|
|
|
GenerationType generation() const { return 0; }
|
|
void reset_generation() {}
|
|
const GenerationType* generation_ptr() const { return nullptr; }
|
|
void set_generation_ptr(const GenerationType*) {}
|
|
};
|
|
|
|
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
|
|
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
|
|
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
|
|
#else
|
|
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
|
|
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
|
|
#endif
|
|
|
|
// Returns whether `n` is a valid capacity (i.e., number of slots).
|
|
//
|
|
// A valid capacity is a non-zero integer `2^m - 1`.
|
|
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
|
|
|
|
// Computes the offset from the start of the backing allocation of control.
|
|
// infoz and growth_left are stored at the beginning of the backing array.
|
|
inline size_t ControlOffset(bool has_infoz) {
|
|
return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
|
|
}
|
|
|
|
// Returns the number of "cloned control bytes".
|
|
//
|
|
// This is the number of control bytes that are present both at the beginning
|
|
// of the control byte array and at the end, such that we can create a
|
|
// `Group::kWidth`-width probe window starting from any control byte.
|
|
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
|
|
|
|
// Given the capacity of a table, computes the offset (from the start of the
|
|
// backing allocation) of the generation counter (if it exists).
|
|
inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
|
|
assert(IsValidCapacity(capacity));
|
|
const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
|
|
return ControlOffset(has_infoz) + num_control_bytes;
|
|
}
|
|
|
|
// Given the capacity of a table, computes the offset (from the start of the
|
|
// backing allocation) at which the slots begin.
|
|
inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
|
|
assert(IsValidCapacity(capacity));
|
|
return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
|
|
slot_align - 1) &
|
|
(~slot_align + 1);
|
|
}
|
|
|
|
// Given the capacity of a table, computes the total size of the backing
|
|
// array.
|
|
inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
|
|
bool has_infoz) {
|
|
return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
|
|
}
|
|
|
|
// CommonFields hold the fields in raw_hash_set that do not depend
|
|
// on template parameters. This allows us to conveniently pass all
|
|
// of this state to helper functions as a single argument.
|
|
class CommonFields : public CommonFieldsGenerationInfo {
|
|
public:
|
|
CommonFields() = default;
|
|
|
|
// Not copyable
|
|
CommonFields(const CommonFields&) = delete;
|
|
CommonFields& operator=(const CommonFields&) = delete;
|
|
|
|
// Movable
|
|
CommonFields(CommonFields&& that) = default;
|
|
CommonFields& operator=(CommonFields&&) = default;
|
|
|
|
ctrl_t* control() const { return control_; }
|
|
void set_control(ctrl_t* c) { control_ = c; }
|
|
void* backing_array_start() const {
|
|
// growth_left (and maybe infoz) is stored before control bytes.
|
|
assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
|
|
return control() - ControlOffset(has_infoz());
|
|
}
|
|
|
|
// Note: we can't use slots() because Qt defines "slots" as a macro.
|
|
void* slot_array() const { return slots_; }
|
|
void set_slots(void* s) { slots_ = s; }
|
|
|
|
// The number of filled slots.
|
|
size_t size() const { return size_ >> HasInfozShift(); }
|
|
void set_size(size_t s) {
|
|
size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
|
|
}
|
|
void increment_size() {
|
|
assert(size() < capacity());
|
|
size_ += size_t{1} << HasInfozShift();
|
|
}
|
|
void decrement_size() {
|
|
assert(size() > 0);
|
|
size_ -= size_t{1} << HasInfozShift();
|
|
}
|
|
|
|
// The total number of available slots.
|
|
size_t capacity() const { return capacity_; }
|
|
void set_capacity(size_t c) {
|
|
assert(c == 0 || IsValidCapacity(c));
|
|
capacity_ = c;
|
|
}
|
|
|
|
// The number of slots we can still fill without needing to rehash.
|
|
// This is stored in the heap allocation before the control bytes.
|
|
size_t growth_left() const {
|
|
const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
|
|
assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
|
|
return *gl_ptr;
|
|
}
|
|
void set_growth_left(size_t gl) {
|
|
size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
|
|
assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
|
|
*gl_ptr = gl;
|
|
}
|
|
|
|
bool has_infoz() const {
|
|
return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
|
|
}
|
|
void set_has_infoz(bool has_infoz) {
|
|
size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
|
|
}
|
|
|
|
HashtablezInfoHandle infoz() {
|
|
return has_infoz()
|
|
? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
|
|
: HashtablezInfoHandle();
|
|
}
|
|
void set_infoz(HashtablezInfoHandle infoz) {
|
|
assert(has_infoz());
|
|
*reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
|
|
}
|
|
|
|
bool should_rehash_for_bug_detection_on_insert() const {
|
|
return CommonFieldsGenerationInfo::
|
|
should_rehash_for_bug_detection_on_insert(control(), capacity());
|
|
}
|
|
bool should_rehash_for_bug_detection_on_move() const {
|
|
return CommonFieldsGenerationInfo::
|
|
should_rehash_for_bug_detection_on_move(control(), capacity());
|
|
}
|
|
void maybe_increment_generation_on_move() {
|
|
if (capacity() == 0) return;
|
|
increment_generation();
|
|
}
|
|
void reset_reserved_growth(size_t reservation) {
|
|
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
|
|
}
|
|
|
|
// The size of the backing array allocation.
|
|
size_t alloc_size(size_t slot_size, size_t slot_align) const {
|
|
return AllocSize(capacity(), slot_size, slot_align, has_infoz());
|
|
}
|
|
|
|
// Returns the number of control bytes set to kDeleted. For testing only.
|
|
size_t TombstonesCount() const {
|
|
return static_cast<size_t>(
|
|
std::count(control(), control() + capacity(), ctrl_t::kDeleted));
|
|
}
|
|
|
|
private:
|
|
// We store the has_infoz bit in the lowest bit of size_.
|
|
static constexpr size_t HasInfozShift() { return 1; }
|
|
static constexpr size_t HasInfozMask() {
|
|
return (size_t{1} << HasInfozShift()) - 1;
|
|
}
|
|
|
|
// TODO(b/182800944): Investigate removing some of these fields:
|
|
// - control/slots can be derived from each other
|
|
|
|
// The control bytes (and, also, a pointer near to the base of the backing
|
|
// array).
|
|
//
|
|
// This contains `capacity + 1 + NumClonedBytes()` entries, even
|
|
// when the table is empty (hence EmptyGroup).
|
|
//
|
|
// Note that growth_left is stored immediately before this pointer.
|
|
ctrl_t* control_ = EmptyGroup();
|
|
|
|
// The beginning of the slots, located at `SlotOffset()` bytes after
|
|
// `control`. May be null for empty tables.
|
|
void* slots_ = nullptr;
|
|
|
|
// The number of slots in the backing array. This is always 2^N-1 for an
|
|
// integer N. NOTE: we tried experimenting with compressing the capacity and
|
|
// storing it together with size_: (a) using 6 bits to store the corresponding
|
|
// power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
|
|
// size_ and storing size in the low bits. Both of these experiments were
|
|
// regressions, presumably because we need capacity to do find operations.
|
|
size_t capacity_ = 0;
|
|
|
|
// The size and also has one bit that stores whether we have infoz.
|
|
size_t size_ = 0;
|
|
};
|
|
|
|
template <class Policy, class Hash, class Eq, class Alloc>
|
|
class raw_hash_set;
|
|
|
|
// Returns the next valid capacity after `n`.
|
|
inline size_t NextCapacity(size_t n) {
|
|
assert(IsValidCapacity(n) || n == 0);
|
|
return n * 2 + 1;
|
|
}
|
|
|
|
// Applies the following mapping to every byte in the control array:
|
|
// * kDeleted -> kEmpty
|
|
// * kEmpty -> kEmpty
|
|
// * _ -> kDeleted
|
|
// PRECONDITION:
|
|
// IsValidCapacity(capacity)
|
|
// ctrl[capacity] == ctrl_t::kSentinel
|
|
// ctrl[i] != ctrl_t::kSentinel for all i < capacity
|
|
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
|
|
|
|
// Converts `n` into the next valid capacity, per `IsValidCapacity`.
|
|
inline size_t NormalizeCapacity(size_t n) {
|
|
return n ? ~size_t{} >> countl_zero(n) : 1;
|
|
}
|
|
|
|
// General notes on capacity/growth methods below:
|
|
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
|
|
// average of two empty slots per group.
|
|
// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
|
|
// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
|
|
// never need to probe (the whole table fits in one group) so we don't need a
|
|
// load factor less than 1.
|
|
|
|
// Given `capacity`, applies the load factor; i.e., it returns the maximum
|
|
// number of values we should put into the table before a resizing rehash.
|
|
inline size_t CapacityToGrowth(size_t capacity) {
|
|
assert(IsValidCapacity(capacity));
|
|
// `capacity*7/8`
|
|
if (Group::kWidth == 8 && capacity == 7) {
|
|
// x-x/8 does not work when x==7.
|
|
return 6;
|
|
}
|
|
return capacity - capacity / 8;
|
|
}
|
|
|
|
// Given `growth`, "unapplies" the load factor to find how large the capacity
|
|
// should be to stay within the load factor.
|
|
//
|
|
// This might not be a valid capacity and `NormalizeCapacity()` should be
|
|
// called on this.
|
|
inline size_t GrowthToLowerboundCapacity(size_t growth) {
|
|
// `growth*8/7`
|
|
if (Group::kWidth == 8 && growth == 7) {
|
|
// x+(x-1)/7 does not work when x==7.
|
|
return 8;
|
|
}
|
|
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
|
|
}
|
|
|
|
template <class InputIter>
|
|
size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
|
|
size_t bucket_count) {
|
|
if (bucket_count != 0) {
|
|
return bucket_count;
|
|
}
|
|
using InputIterCategory =
|
|
typename std::iterator_traits<InputIter>::iterator_category;
|
|
if (std::is_base_of<std::random_access_iterator_tag,
|
|
InputIterCategory>::value) {
|
|
return GrowthToLowerboundCapacity(
|
|
static_cast<size_t>(std::distance(first, last)));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
constexpr bool SwisstableDebugEnabled() {
|
|
#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
|
|
ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
|
|
return true;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
|
|
const GenerationType* generation_ptr,
|
|
const char* operation) {
|
|
if (!SwisstableDebugEnabled()) return;
|
|
// `SwisstableDebugEnabled()` is also true for release builds with hardening
|
|
// enabled. To minimize their impact in those builds:
|
|
// - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
|
|
// - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
|
|
// the chances that the hot paths will be inlined.
|
|
if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
|
|
ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
|
|
}
|
|
if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
|
|
ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
|
|
operation);
|
|
}
|
|
if (SwisstableGenerationsEnabled()) {
|
|
if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
|
|
ABSL_RAW_LOG(FATAL,
|
|
"%s called on invalid iterator. The table could have "
|
|
"rehashed or moved since this iterator was initialized.",
|
|
operation);
|
|
}
|
|
if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
|
|
ABSL_RAW_LOG(
|
|
FATAL,
|
|
"%s called on invalid iterator. The element was likely erased.",
|
|
operation);
|
|
}
|
|
} else {
|
|
if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
|
|
ABSL_RAW_LOG(
|
|
FATAL,
|
|
"%s called on invalid iterator. The element might have been erased "
|
|
"or the table might have rehashed. Consider running with "
|
|
"--config=asan to diagnose rehashing issues.",
|
|
operation);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Note that for comparisons, null/end iterators are valid.
|
|
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
|
|
GenerationType generation,
|
|
const GenerationType* generation_ptr) {
|
|
if (!SwisstableDebugEnabled()) return;
|
|
const bool ctrl_is_valid_for_comparison =
|
|
ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
|
|
if (SwisstableGenerationsEnabled()) {
|
|
if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
|
|
ABSL_RAW_LOG(FATAL,
|
|
"Invalid iterator comparison. The table could have rehashed "
|
|
"or moved since this iterator was initialized.");
|
|
}
|
|
if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
|
|
ABSL_RAW_LOG(
|
|
FATAL, "Invalid iterator comparison. The element was likely erased.");
|
|
}
|
|
} else {
|
|
ABSL_HARDENING_ASSERT(
|
|
ctrl_is_valid_for_comparison &&
|
|
"Invalid iterator comparison. The element might have been erased or "
|
|
"the table might have rehashed. Consider running with --config=asan to "
|
|
"diagnose rehashing issues.");
|
|
}
|
|
}
|
|
|
|
// If the two iterators come from the same container, then their pointers will
|
|
// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
|
|
// Note: we take slots by reference so that it's not UB if they're uninitialized
|
|
// as long as we don't read them (when ctrl is null).
|
|
inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
|
|
const ctrl_t* ctrl_b,
|
|
const void* const& slot_a,
|
|
const void* const& slot_b) {
|
|
// If either control byte is null, then we can't tell.
|
|
if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
|
|
const void* low_slot = slot_a;
|
|
const void* hi_slot = slot_b;
|
|
if (ctrl_a > ctrl_b) {
|
|
std::swap(ctrl_a, ctrl_b);
|
|
std::swap(low_slot, hi_slot);
|
|
}
|
|
return ctrl_b < low_slot && low_slot <= hi_slot;
|
|
}
|
|
|
|
// Asserts that two iterators come from the same container.
|
|
// Note: we take slots by reference so that it's not UB if they're uninitialized
|
|
// as long as we don't read them (when ctrl is null).
|
|
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
|
|
const void* const& slot_a,
|
|
const void* const& slot_b,
|
|
const GenerationType* generation_ptr_a,
|
|
const GenerationType* generation_ptr_b) {
|
|
if (!SwisstableDebugEnabled()) return;
|
|
// `SwisstableDebugEnabled()` is also true for release builds with hardening
|
|
// enabled. To minimize their impact in those builds:
|
|
// - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
|
|
// - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
|
|
// the chances that the hot paths will be inlined.
|
|
const bool a_is_default = ctrl_a == EmptyGroup();
|
|
const bool b_is_default = ctrl_b == EmptyGroup();
|
|
if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
|
|
ABSL_RAW_LOG(
|
|
FATAL,
|
|
"Invalid iterator comparison. Comparing default-constructed iterator "
|
|
"with non-default-constructed iterator.");
|
|
}
|
|
if (a_is_default && b_is_default) return;
|
|
|
|
if (SwisstableGenerationsEnabled()) {
|
|
if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
|
|
const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
|
|
const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
|
|
if (a_is_empty != b_is_empty) {
|
|
ABSL_RAW_LOG(FATAL,
|
|
"Invalid iterator comparison. Comparing iterator from a "
|
|
"non-empty hashtable with an iterator from an empty "
|
|
"hashtable.");
|
|
}
|
|
if (a_is_empty && b_is_empty) {
|
|
ABSL_RAW_LOG(FATAL,
|
|
"Invalid iterator comparison. Comparing iterators from "
|
|
"different empty hashtables.");
|
|
}
|
|
const bool a_is_end = ctrl_a == nullptr;
|
|
const bool b_is_end = ctrl_b == nullptr;
|
|
if (a_is_end || b_is_end) {
|
|
ABSL_RAW_LOG(FATAL,
|
|
"Invalid iterator comparison. Comparing iterator with an "
|
|
"end() iterator from a different hashtable.");
|
|
}
|
|
ABSL_RAW_LOG(FATAL,
|
|
"Invalid iterator comparison. Comparing non-end() iterators "
|
|
"from different hashtables.");
|
|
} else {
|
|
ABSL_HARDENING_ASSERT(
|
|
AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
|
|
"Invalid iterator comparison. The iterators may be from different "
|
|
"containers or the container might have rehashed or moved. Consider "
|
|
"running with --config=asan to diagnose issues.");
|
|
}
|
|
}
|
|
|
|
struct FindInfo {
|
|
size_t offset;
|
|
size_t probe_length;
|
|
};
|
|
|
|
// Whether a table is "small". A small table fits entirely into a probing
|
|
// group, i.e., has a capacity < `Group::kWidth`.
|
|
//
|
|
// In small mode we are able to use the whole capacity. The extra control
|
|
// bytes give us at least one "empty" control byte to stop the iteration.
|
|
// This is important to make 1 a valid capacity.
|
|
//
|
|
// In small mode only the first `capacity` control bytes after the sentinel
|
|
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
|
|
// represent a real slot. This is important to take into account on
|
|
// `find_first_non_full()`, where we never try
|
|
// `ShouldInsertBackwards()` for small tables.
|
|
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
|
|
|
|
// Begins a probing operation on `common.control`, using `hash`.
|
|
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
|
|
size_t hash) {
|
|
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
|
|
}
|
|
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
|
|
return probe(common.control(), common.capacity(), hash);
|
|
}
|
|
|
|
// Probes an array of control bits using a probe sequence derived from `hash`,
|
|
// and returns the offset corresponding to the first deleted or empty slot.
|
|
//
|
|
// Behavior when the entire table is full is undefined.
|
|
//
|
|
// NOTE: this function must work with tables having both empty and deleted
|
|
// slots in the same group. Such tables appear during `erase()`.
|
|
template <typename = void>
|
|
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
|
|
auto seq = probe(common, hash);
|
|
const ctrl_t* ctrl = common.control();
|
|
while (true) {
|
|
GroupEmptyOrDeleted g{ctrl + seq.offset()};
|
|
auto mask = g.MaskEmptyOrDeleted();
|
|
if (mask) {
|
|
#if !defined(NDEBUG)
|
|
// We want to add entropy even when ASLR is not enabled.
|
|
// In debug build we will randomly insert in either the front or back of
|
|
// the group.
|
|
// TODO(kfm,sbenza): revisit after we do unconditional mixing
|
|
if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
|
|
return {seq.offset(mask.HighestBitSet()), seq.index()};
|
|
}
|
|
#endif
|
|
return {seq.offset(mask.LowestBitSet()), seq.index()};
|
|
}
|
|
seq.next();
|
|
assert(seq.index() <= common.capacity() && "full table!");
|
|
}
|
|
}
|
|
|
|
// Extern template for inline function keep possibility of inlining.
|
|
// When compiler decided to not inline, no symbols will be added to the
|
|
// corresponding translation unit.
|
|
extern template FindInfo find_first_non_full(const CommonFields&, size_t);
|
|
|
|
// Non-inlined version of find_first_non_full for use in less
|
|
// performance critical routines.
|
|
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
|
|
|
|
inline void ResetGrowthLeft(CommonFields& common) {
|
|
common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
|
|
}
|
|
|
|
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
|
|
// array as marked as empty.
|
|
inline void ResetCtrl(CommonFields& common, size_t slot_size) {
|
|
const size_t capacity = common.capacity();
|
|
ctrl_t* ctrl = common.control();
|
|
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
|
|
capacity + 1 + NumClonedBytes());
|
|
ctrl[capacity] = ctrl_t::kSentinel;
|
|
SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
|
|
ResetGrowthLeft(common);
|
|
}
|
|
|
|
// Sets `ctrl[i]` to `h`.
|
|
//
|
|
// Unlike setting it directly, this function will perform bounds checks and
|
|
// mirror the value to the cloned tail if necessary.
|
|
inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
|
|
size_t slot_size) {
|
|
const size_t capacity = common.capacity();
|
|
assert(i < capacity);
|
|
|
|
auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
|
|
if (IsFull(h)) {
|
|
SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
|
|
} else {
|
|
SanitizerPoisonMemoryRegion(slot_i, slot_size);
|
|
}
|
|
|
|
ctrl_t* ctrl = common.control();
|
|
ctrl[i] = h;
|
|
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
|
|
}
|
|
|
|
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
|
|
inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
|
|
size_t slot_size) {
|
|
SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
|
|
}
|
|
|
|
// growth_left (which is a size_t) is stored with the backing array.
|
|
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
|
|
return (std::max)(align_of_slot, alignof(size_t));
|
|
}
|
|
|
|
template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
|
|
ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
|
|
assert(c.capacity());
|
|
// Folks with custom allocators often make unwarranted assumptions about the
|
|
// behavior of their classes vis-a-vis trivial destructability and what
|
|
// calls they will or won't make. Avoid sampling for people with custom
|
|
// allocators to get us out of this mess. This is not a hard guarantee but
|
|
// a workaround while we plan the exact guarantee we want to provide.
|
|
const size_t sample_size =
|
|
(std::is_same<Alloc, std::allocator<char>>::value &&
|
|
c.slot_array() == nullptr)
|
|
? SizeOfSlot
|
|
: 0;
|
|
HashtablezInfoHandle infoz =
|
|
sample_size > 0 ? Sample(sample_size) : c.infoz();
|
|
|
|
const bool has_infoz = infoz.IsSampled();
|
|
const size_t cap = c.capacity();
|
|
const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
|
|
char* mem = static_cast<char*>(
|
|
Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
|
|
const GenerationType old_generation = c.generation();
|
|
c.set_generation_ptr(reinterpret_cast<GenerationType*>(
|
|
mem + GenerationOffset(cap, has_infoz)));
|
|
c.set_generation(NextGeneration(old_generation));
|
|
c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
|
|
c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
|
|
ResetCtrl(c, SizeOfSlot);
|
|
c.set_has_infoz(has_infoz);
|
|
if (has_infoz) {
|
|
infoz.RecordStorageChanged(c.size(), cap);
|
|
c.set_infoz(infoz);
|
|
}
|
|
}
|
|
|
|
// PolicyFunctions bundles together some information for a particular
|
|
// raw_hash_set<T, ...> instantiation. This information is passed to
|
|
// type-erased functions that want to do small amounts of type-specific
|
|
// work.
|
|
struct PolicyFunctions {
|
|
size_t slot_size;
|
|
|
|
// Returns the hash of the pointed-to slot.
|
|
size_t (*hash_slot)(void* set, void* slot);
|
|
|
|
// Transfer the contents of src_slot to dst_slot.
|
|
void (*transfer)(void* set, void* dst_slot, void* src_slot);
|
|
|
|
// Deallocate the backing store from common.
|
|
void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
|
|
};
|
|
|
|
// ClearBackingArray clears the backing array, either modifying it in place,
|
|
// or creating a new one based on the value of "reuse".
|
|
// REQUIRES: c.capacity > 0
|
|
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
|
|
bool reuse);
|
|
|
|
// Type-erased version of raw_hash_set::erase_meta_only.
|
|
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
|
|
|
|
// Function to place in PolicyFunctions::dealloc for raw_hash_sets
|
|
// that are using std::allocator. This allows us to share the same
|
|
// function body for raw_hash_set instantiations that have the
|
|
// same slot alignment.
|
|
template <size_t AlignOfSlot>
|
|
ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
|
|
const PolicyFunctions& policy) {
|
|
// Unpoison before returning the memory to the allocator.
|
|
SanitizerUnpoisonMemoryRegion(common.slot_array(),
|
|
policy.slot_size * common.capacity());
|
|
|
|
std::allocator<char> alloc;
|
|
common.infoz().Unregister();
|
|
Deallocate<BackingArrayAlignment(AlignOfSlot)>(
|
|
&alloc, common.backing_array_start(),
|
|
common.alloc_size(policy.slot_size, AlignOfSlot));
|
|
}
|
|
|
|
// For trivially relocatable types we use memcpy directly. This allows us to
|
|
// share the same function body for raw_hash_set instantiations that have the
|
|
// same slot size as long as they are relocatable.
|
|
template <size_t SizeOfSlot>
|
|
ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
|
|
memcpy(dst, src, SizeOfSlot);
|
|
}
|
|
|
|
// Type-erased version of raw_hash_set::drop_deletes_without_resize.
|
|
void DropDeletesWithoutResize(CommonFields& common,
|
|
const PolicyFunctions& policy, void* tmp_space);
|
|
|
|
// A SwissTable.
|
|
//
|
|
// Policy: a policy defines how to perform different operations on
|
|
// the slots of the hashtable (see hash_policy_traits.h for the full interface
|
|
// of policy).
|
|
//
|
|
// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
|
|
// functor should accept a key and return size_t as hash. For best performance
|
|
// it is important that the hash function provides high entropy across all bits
|
|
// of the hash.
|
|
//
|
|
// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
|
|
// should accept two (of possibly different type) keys and return a bool: true
|
|
// if they are equal, false if they are not. If two keys compare equal, then
|
|
// their hash values as defined by Hash MUST be equal.
|
|
//
|
|
// Allocator: an Allocator
|
|
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
|
|
// the storage of the hashtable will be allocated and the elements will be
|
|
// constructed and destroyed.
|
|
template <class Policy, class Hash, class Eq, class Alloc>
|
|
class raw_hash_set {
|
|
using PolicyTraits = hash_policy_traits<Policy>;
|
|
using KeyArgImpl =
|
|
KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
|
|
|
|
public:
|
|
using init_type = typename PolicyTraits::init_type;
|
|
using key_type = typename PolicyTraits::key_type;
|
|
// TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
|
|
// code fixes!
|
|
using slot_type = typename PolicyTraits::slot_type;
|
|
using allocator_type = Alloc;
|
|
using size_type = size_t;
|
|
using difference_type = ptrdiff_t;
|
|
using hasher = Hash;
|
|
using key_equal = Eq;
|
|
using policy_type = Policy;
|
|
using value_type = typename PolicyTraits::value_type;
|
|
using reference = value_type&;
|
|
using const_reference = const value_type&;
|
|
using pointer = typename absl::allocator_traits<
|
|
allocator_type>::template rebind_traits<value_type>::pointer;
|
|
using const_pointer = typename absl::allocator_traits<
|
|
allocator_type>::template rebind_traits<value_type>::const_pointer;
|
|
|
|
// Alias used for heterogeneous lookup functions.
|
|
// `key_arg<K>` evaluates to `K` when the functors are transparent and to
|
|
// `key_type` otherwise. It permits template argument deduction on `K` for the
|
|
// transparent case.
|
|
template <class K>
|
|
using key_arg = typename KeyArgImpl::template type<K, key_type>;
|
|
|
|
private:
|
|
// Give an early error when key_type is not hashable/eq.
|
|
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
|
|
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
|
|
|
|
using AllocTraits = absl::allocator_traits<allocator_type>;
|
|
using SlotAlloc = typename absl::allocator_traits<
|
|
allocator_type>::template rebind_alloc<slot_type>;
|
|
using SlotAllocTraits = typename absl::allocator_traits<
|
|
allocator_type>::template rebind_traits<slot_type>;
|
|
|
|
static_assert(std::is_lvalue_reference<reference>::value,
|
|
"Policy::element() must return a reference");
|
|
|
|
template <typename T>
|
|
struct SameAsElementReference
|
|
: std::is_same<typename std::remove_cv<
|
|
typename std::remove_reference<reference>::type>::type,
|
|
typename std::remove_cv<
|
|
typename std::remove_reference<T>::type>::type> {};
|
|
|
|
// An enabler for insert(T&&): T must be convertible to init_type or be the
|
|
// same as [cv] value_type [ref].
|
|
// Note: we separate SameAsElementReference into its own type to avoid using
|
|
// reference unless we need to. MSVC doesn't seem to like it in some
|
|
// cases.
|
|
template <class T>
|
|
using RequiresInsertable = typename std::enable_if<
|
|
absl::disjunction<std::is_convertible<T, init_type>,
|
|
SameAsElementReference<T>>::value,
|
|
int>::type;
|
|
|
|
// RequiresNotInit is a workaround for gcc prior to 7.1.
|
|
// See https://godbolt.org/g/Y4xsUh.
|
|
template <class T>
|
|
using RequiresNotInit =
|
|
typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
|
|
|
|
template <class... Ts>
|
|
using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
|
|
|
|
public:
|
|
static_assert(std::is_same<pointer, value_type*>::value,
|
|
"Allocators with custom pointer types are not supported");
|
|
static_assert(std::is_same<const_pointer, const value_type*>::value,
|
|
"Allocators with custom pointer types are not supported");
|
|
|
|
class iterator : private HashSetIteratorGenerationInfo {
|
|
friend class raw_hash_set;
|
|
|
|
public:
|
|
using iterator_category = std::forward_iterator_tag;
|
|
using value_type = typename raw_hash_set::value_type;
|
|
using reference =
|
|
absl::conditional_t<PolicyTraits::constant_iterators::value,
|
|
const value_type&, value_type&>;
|
|
using pointer = absl::remove_reference_t<reference>*;
|
|
using difference_type = typename raw_hash_set::difference_type;
|
|
|
|
iterator() {}
|
|
|
|
// PRECONDITION: not an end() iterator.
|
|
reference operator*() const {
|
|
AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
|
|
return PolicyTraits::element(slot_);
|
|
}
|
|
|
|
// PRECONDITION: not an end() iterator.
|
|
pointer operator->() const {
|
|
AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
|
|
return &operator*();
|
|
}
|
|
|
|
// PRECONDITION: not an end() iterator.
|
|
iterator& operator++() {
|
|
AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
|
|
++ctrl_;
|
|
++slot_;
|
|
skip_empty_or_deleted();
|
|
return *this;
|
|
}
|
|
// PRECONDITION: not an end() iterator.
|
|
iterator operator++(int) {
|
|
auto tmp = *this;
|
|
++*this;
|
|
return tmp;
|
|
}
|
|
|
|
friend bool operator==(const iterator& a, const iterator& b) {
|
|
AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
|
|
AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
|
|
AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
|
|
a.generation_ptr(), b.generation_ptr());
|
|
return a.ctrl_ == b.ctrl_;
|
|
}
|
|
friend bool operator!=(const iterator& a, const iterator& b) {
|
|
return !(a == b);
|
|
}
|
|
|
|
private:
|
|
iterator(ctrl_t* ctrl, slot_type* slot,
|
|
const GenerationType* generation_ptr)
|
|
: HashSetIteratorGenerationInfo(generation_ptr),
|
|
ctrl_(ctrl),
|
|
slot_(slot) {
|
|
// This assumption helps the compiler know that any non-end iterator is
|
|
// not equal to any end iterator.
|
|
ABSL_ASSUME(ctrl != nullptr);
|
|
}
|
|
// For end() iterators.
|
|
explicit iterator(const GenerationType* generation_ptr)
|
|
: HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
|
|
|
|
// Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
|
|
// they reach one.
|
|
//
|
|
// If a sentinel is reached, we null `ctrl_` out instead.
|
|
void skip_empty_or_deleted() {
|
|
while (IsEmptyOrDeleted(*ctrl_)) {
|
|
uint32_t shift =
|
|
GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
|
|
ctrl_ += shift;
|
|
slot_ += shift;
|
|
}
|
|
if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
|
|
}
|
|
|
|
ctrl_t* control() const { return ctrl_; }
|
|
slot_type* slot() const { return slot_; }
|
|
|
|
// We use EmptyGroup() for default-constructed iterators so that they can
|
|
// be distinguished from end iterators, which have nullptr ctrl_.
|
|
ctrl_t* ctrl_ = EmptyGroup();
|
|
// To avoid uninitialized member warnings, put slot_ in an anonymous union.
|
|
// The member is not initialized on singleton and end iterators.
|
|
union {
|
|
slot_type* slot_;
|
|
};
|
|
};
|
|
|
|
class const_iterator {
|
|
friend class raw_hash_set;
|
|
template <class Container, typename Enabler>
|
|
friend struct absl::container_internal::hashtable_debug_internal::
|
|
HashtableDebugAccess;
|
|
|
|
public:
|
|
using iterator_category = typename iterator::iterator_category;
|
|
using value_type = typename raw_hash_set::value_type;
|
|
using reference = typename raw_hash_set::const_reference;
|
|
using pointer = typename raw_hash_set::const_pointer;
|
|
using difference_type = typename raw_hash_set::difference_type;
|
|
|
|
const_iterator() = default;
|
|
// Implicit construction from iterator.
|
|
const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
|
|
|
|
reference operator*() const { return *inner_; }
|
|
pointer operator->() const { return inner_.operator->(); }
|
|
|
|
const_iterator& operator++() {
|
|
++inner_;
|
|
return *this;
|
|
}
|
|
const_iterator operator++(int) { return inner_++; }
|
|
|
|
friend bool operator==(const const_iterator& a, const const_iterator& b) {
|
|
return a.inner_ == b.inner_;
|
|
}
|
|
friend bool operator!=(const const_iterator& a, const const_iterator& b) {
|
|
return !(a == b);
|
|
}
|
|
|
|
private:
|
|
const_iterator(const ctrl_t* ctrl, const slot_type* slot,
|
|
const GenerationType* gen)
|
|
: inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
|
|
}
|
|
ctrl_t* control() const { return inner_.control(); }
|
|
slot_type* slot() const { return inner_.slot(); }
|
|
|
|
iterator inner_;
|
|
};
|
|
|
|
using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
|
|
using insert_return_type = InsertReturnType<iterator, node_type>;
|
|
|
|
// Note: can't use `= default` due to non-default noexcept (causes
|
|
// problems for some compilers). NOLINTNEXTLINE
|
|
raw_hash_set() noexcept(
|
|
std::is_nothrow_default_constructible<hasher>::value &&
|
|
std::is_nothrow_default_constructible<key_equal>::value &&
|
|
std::is_nothrow_default_constructible<allocator_type>::value) {}
|
|
|
|
ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
|
|
size_t bucket_count, const hasher& hash = hasher(),
|
|
const key_equal& eq = key_equal(),
|
|
const allocator_type& alloc = allocator_type())
|
|
: settings_(CommonFields{}, hash, eq, alloc) {
|
|
if (bucket_count) {
|
|
common().set_capacity(NormalizeCapacity(bucket_count));
|
|
initialize_slots();
|
|
}
|
|
}
|
|
|
|
raw_hash_set(size_t bucket_count, const hasher& hash,
|
|
const allocator_type& alloc)
|
|
: raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
|
|
|
|
raw_hash_set(size_t bucket_count, const allocator_type& alloc)
|
|
: raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
|
|
|
|
explicit raw_hash_set(const allocator_type& alloc)
|
|
: raw_hash_set(0, hasher(), key_equal(), alloc) {}
|
|
|
|
template <class InputIter>
|
|
raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
|
|
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
|
|
const allocator_type& alloc = allocator_type())
|
|
: raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
|
|
hash, eq, alloc) {
|
|
insert(first, last);
|
|
}
|
|
|
|
template <class InputIter>
|
|
raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
|
|
const hasher& hash, const allocator_type& alloc)
|
|
: raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
|
|
|
|
template <class InputIter>
|
|
raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
|
|
const allocator_type& alloc)
|
|
: raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
|
|
|
|
template <class InputIter>
|
|
raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
|
|
: raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
|
|
|
|
// Instead of accepting std::initializer_list<value_type> as the first
|
|
// argument like std::unordered_set<value_type> does, we have two overloads
|
|
// that accept std::initializer_list<T> and std::initializer_list<init_type>.
|
|
// This is advantageous for performance.
|
|
//
|
|
// // Turns {"abc", "def"} into std::initializer_list<std::string>, then
|
|
// // copies the strings into the set.
|
|
// std::unordered_set<std::string> s = {"abc", "def"};
|
|
//
|
|
// // Turns {"abc", "def"} into std::initializer_list<const char*>, then
|
|
// // copies the strings into the set.
|
|
// absl::flat_hash_set<std::string> s = {"abc", "def"};
|
|
//
|
|
// The same trick is used in insert().
|
|
//
|
|
// The enabler is necessary to prevent this constructor from triggering where
|
|
// the copy constructor is meant to be called.
|
|
//
|
|
// absl::flat_hash_set<int> a, b{a};
|
|
//
|
|
// RequiresNotInit<T> is a workaround for gcc prior to 7.1.
|
|
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
|
|
raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
|
|
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
|
|
const allocator_type& alloc = allocator_type())
|
|
: raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
|
|
|
|
raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
|
|
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
|
|
const allocator_type& alloc = allocator_type())
|
|
: raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
|
|
|
|
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
|
|
raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
|
|
const hasher& hash, const allocator_type& alloc)
|
|
: raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
|
|
|
|
raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
|
|
const hasher& hash, const allocator_type& alloc)
|
|
: raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
|
|
|
|
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
|
|
raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
|
|
const allocator_type& alloc)
|
|
: raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
|
|
|
|
raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
|
|
const allocator_type& alloc)
|
|
: raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
|
|
|
|
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
|
|
raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
|
|
: raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
|
|
|
|
raw_hash_set(std::initializer_list<init_type> init,
|
|
const allocator_type& alloc)
|
|
: raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
|
|
|
|
raw_hash_set(const raw_hash_set& that)
|
|
: raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
|
|
that.alloc_ref())) {}
|
|
|
|
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
|
|
: raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
|
|
const size_t size = that.size();
|
|
if (size == 0) return;
|
|
reserve(size);
|
|
// Because the table is guaranteed to be empty, we can do something faster
|
|
// than a full `insert`.
|
|
for (const auto& v : that) {
|
|
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
|
|
auto target = find_first_non_full_outofline(common(), hash);
|
|
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
|
|
emplace_at(target.offset, v);
|
|
common().maybe_increment_generation_on_insert();
|
|
infoz().RecordInsert(hash, target.probe_length);
|
|
}
|
|
common().set_size(size);
|
|
set_growth_left(growth_left() - size);
|
|
}
|
|
|
|
ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
|
|
std::is_nothrow_copy_constructible<hasher>::value &&
|
|
std::is_nothrow_copy_constructible<key_equal>::value &&
|
|
std::is_nothrow_copy_constructible<allocator_type>::value)
|
|
: // Hash, equality and allocator are copied instead of moved because
|
|
// `that` must be left valid. If Hash is std::function<Key>, moving it
|
|
// would create a nullptr functor that cannot be called.
|
|
// TODO(b/296061262): move instead of copying hash/eq/alloc.
|
|
// Note: we avoid using exchange for better generated code.
|
|
settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
|
|
that.alloc_ref()) {
|
|
that.common() = CommonFields{};
|
|
maybe_increment_generation_or_rehash_on_move();
|
|
}
|
|
|
|
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
|
|
: settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
|
|
if (a == that.alloc_ref()) {
|
|
std::swap(common(), that.common());
|
|
maybe_increment_generation_or_rehash_on_move();
|
|
} else {
|
|
move_elements_allocs_unequal(std::move(that));
|
|
}
|
|
}
|
|
|
|
raw_hash_set& operator=(const raw_hash_set& that) {
|
|
if (ABSL_PREDICT_FALSE(this == &that)) return *this;
|
|
constexpr bool propagate_alloc =
|
|
AllocTraits::propagate_on_container_copy_assignment::value;
|
|
// TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
|
|
// is an exact match for that.size(). If this->capacity() is too big, then
|
|
// it would make iteration very slow to reuse the allocation. Maybe we can
|
|
// do the same heuristic as clear() and reuse if it's small enough.
|
|
raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
|
|
// NOLINTNEXTLINE: not returning *this for performance.
|
|
return assign_impl<propagate_alloc>(std::move(tmp));
|
|
}
|
|
|
|
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
|
|
absl::allocator_traits<allocator_type>::is_always_equal::value &&
|
|
std::is_nothrow_move_assignable<hasher>::value &&
|
|
std::is_nothrow_move_assignable<key_equal>::value) {
|
|
// TODO(sbenza): We should only use the operations from the noexcept clause
|
|
// to make sure we actually adhere to that contract.
|
|
// NOLINTNEXTLINE: not returning *this for performance.
|
|
return move_assign(
|
|
std::move(that),
|
|
typename AllocTraits::propagate_on_container_move_assignment());
|
|
}
|
|
|
|
~raw_hash_set() { destructor_impl(); }
|
|
|
|
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
auto it = iterator_at(0);
|
|
it.skip_empty_or_deleted();
|
|
return it;
|
|
}
|
|
iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return iterator(common().generation_ptr());
|
|
}
|
|
|
|
const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return const_cast<raw_hash_set*>(this)->begin();
|
|
}
|
|
const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return iterator(common().generation_ptr());
|
|
}
|
|
const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return begin();
|
|
}
|
|
const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
|
|
|
|
bool empty() const { return !size(); }
|
|
size_t size() const { return common().size(); }
|
|
size_t capacity() const { return common().capacity(); }
|
|
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
|
|
|
|
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
|
|
// Iterating over this container is O(bucket_count()). When bucket_count()
|
|
// is much greater than size(), iteration becomes prohibitively expensive.
|
|
// For clear() it is more important to reuse the allocated array when the
|
|
// container is small because allocation takes comparatively long time
|
|
// compared to destruction of the elements of the container. So we pick the
|
|
// largest bucket_count() threshold for which iteration is still fast and
|
|
// past that we simply deallocate the array.
|
|
const size_t cap = capacity();
|
|
if (cap == 0) {
|
|
// Already guaranteed to be empty; so nothing to do.
|
|
} else {
|
|
destroy_slots();
|
|
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
|
|
}
|
|
common().set_reserved_growth(0);
|
|
common().set_reservation_size(0);
|
|
}
|
|
|
|
// This overload kicks in when the argument is an rvalue of insertable and
|
|
// decomposable type other than init_type.
|
|
//
|
|
// flat_hash_map<std::string, int> m;
|
|
// m.insert(std::make_pair("abc", 42));
|
|
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
|
|
// bug.
|
|
template <class T, RequiresInsertable<T> = 0, class T2 = T,
|
|
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
|
|
T* = nullptr>
|
|
std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return emplace(std::forward<T>(value));
|
|
}
|
|
|
|
// This overload kicks in when the argument is a bitfield or an lvalue of
|
|
// insertable and decomposable type.
|
|
//
|
|
// union { int n : 1; };
|
|
// flat_hash_set<int> s;
|
|
// s.insert(n);
|
|
//
|
|
// flat_hash_set<std::string> s;
|
|
// const char* p = "hello";
|
|
// s.insert(p);
|
|
//
|
|
template <
|
|
class T, RequiresInsertable<const T&> = 0,
|
|
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
|
|
std::pair<iterator, bool> insert(const T& value)
|
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return emplace(value);
|
|
}
|
|
|
|
// This overload kicks in when the argument is an rvalue of init_type. Its
|
|
// purpose is to handle brace-init-list arguments.
|
|
//
|
|
// flat_hash_map<std::string, int> s;
|
|
// s.insert({"abc", 42});
|
|
std::pair<iterator, bool> insert(init_type&& value)
|
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return emplace(std::move(value));
|
|
}
|
|
|
|
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
|
|
// bug.
|
|
template <class T, RequiresInsertable<T> = 0, class T2 = T,
|
|
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
|
|
T* = nullptr>
|
|
iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return insert(std::forward<T>(value)).first;
|
|
}
|
|
|
|
template <
|
|
class T, RequiresInsertable<const T&> = 0,
|
|
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
|
|
iterator insert(const_iterator,
|
|
const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return insert(value).first;
|
|
}
|
|
|
|
iterator insert(const_iterator,
|
|
init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return insert(std::move(value)).first;
|
|
}
|
|
|
|
template <class InputIt>
|
|
void insert(InputIt first, InputIt last) {
|
|
for (; first != last; ++first) emplace(*first);
|
|
}
|
|
|
|
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
|
|
void insert(std::initializer_list<T> ilist) {
|
|
insert(ilist.begin(), ilist.end());
|
|
}
|
|
|
|
void insert(std::initializer_list<init_type> ilist) {
|
|
insert(ilist.begin(), ilist.end());
|
|
}
|
|
|
|
insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
if (!node) return {end(), false, node_type()};
|
|
const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
|
|
auto res = PolicyTraits::apply(
|
|
InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
|
|
elem);
|
|
if (res.second) {
|
|
CommonAccess::Reset(&node);
|
|
return {res.first, true, node_type()};
|
|
} else {
|
|
return {res.first, false, std::move(node)};
|
|
}
|
|
}
|
|
|
|
iterator insert(const_iterator,
|
|
node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
auto res = insert(std::move(node));
|
|
node = std::move(res.node);
|
|
return res.position;
|
|
}
|
|
|
|
// This overload kicks in if we can deduce the key from args. This enables us
|
|
// to avoid constructing value_type if an entry with the same key already
|
|
// exists.
|
|
//
|
|
// For example:
|
|
//
|
|
// flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
|
|
// // Creates no std::string copies and makes no heap allocations.
|
|
// m.emplace("abc", "xyz");
|
|
template <class... Args, typename std::enable_if<
|
|
IsDecomposable<Args...>::value, int>::type = 0>
|
|
std::pair<iterator, bool> emplace(Args&&... args)
|
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return PolicyTraits::apply(EmplaceDecomposable{*this},
|
|
std::forward<Args>(args)...);
|
|
}
|
|
|
|
// This overload kicks in if we cannot deduce the key from args. It constructs
|
|
// value_type unconditionally and then either moves it into the table or
|
|
// destroys.
|
|
template <class... Args, typename std::enable_if<
|
|
!IsDecomposable<Args...>::value, int>::type = 0>
|
|
std::pair<iterator, bool> emplace(Args&&... args)
|
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
|
|
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
|
|
|
|
construct(slot, std::forward<Args>(args)...);
|
|
const auto& elem = PolicyTraits::element(slot);
|
|
return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
|
|
}
|
|
|
|
template <class... Args>
|
|
iterator emplace_hint(const_iterator,
|
|
Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return emplace(std::forward<Args>(args)...).first;
|
|
}
|
|
|
|
// Extension API: support for lazy emplace.
|
|
//
|
|
// Looks up key in the table. If found, returns the iterator to the element.
|
|
// Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
|
|
// and returns an iterator to the new element.
|
|
//
|
|
// `f` must abide by several restrictions:
|
|
// - it MUST call `raw_hash_set::constructor` with arguments as if a
|
|
// `raw_hash_set::value_type` is constructed,
|
|
// - it MUST NOT access the container before the call to
|
|
// `raw_hash_set::constructor`, and
|
|
// - it MUST NOT erase the lazily emplaced element.
|
|
// Doing any of these is undefined behavior.
|
|
//
|
|
// For example:
|
|
//
|
|
// std::unordered_set<ArenaString> s;
|
|
// // Makes ArenaStr even if "abc" is in the map.
|
|
// s.insert(ArenaString(&arena, "abc"));
|
|
//
|
|
// flat_hash_set<ArenaStr> s;
|
|
// // Makes ArenaStr only if "abc" is not in the map.
|
|
// s.lazy_emplace("abc", [&](const constructor& ctor) {
|
|
// ctor(&arena, "abc");
|
|
// });
|
|
//
|
|
// WARNING: This API is currently experimental. If there is a way to implement
|
|
// the same thing with the rest of the API, prefer that.
|
|
class constructor {
|
|
friend class raw_hash_set;
|
|
|
|
public:
|
|
template <class... Args>
|
|
void operator()(Args&&... args) const {
|
|
assert(*slot_);
|
|
PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
|
|
*slot_ = nullptr;
|
|
}
|
|
|
|
private:
|
|
constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
|
|
|
|
allocator_type* alloc_;
|
|
slot_type** slot_;
|
|
};
|
|
|
|
template <class K = key_type, class F>
|
|
iterator lazy_emplace(const key_arg<K>& key,
|
|
F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
auto res = find_or_prepare_insert(key);
|
|
if (res.second) {
|
|
slot_type* slot = slot_array() + res.first;
|
|
std::forward<F>(f)(constructor(&alloc_ref(), &slot));
|
|
assert(!slot);
|
|
}
|
|
return iterator_at(res.first);
|
|
}
|
|
|
|
// Extension API: support for heterogeneous keys.
|
|
//
|
|
// std::unordered_set<std::string> s;
|
|
// // Turns "abc" into std::string.
|
|
// s.erase("abc");
|
|
//
|
|
// flat_hash_set<std::string> s;
|
|
// // Uses "abc" directly without copying it into std::string.
|
|
// s.erase("abc");
|
|
template <class K = key_type>
|
|
size_type erase(const key_arg<K>& key) {
|
|
auto it = find(key);
|
|
if (it == end()) return 0;
|
|
erase(it);
|
|
return 1;
|
|
}
|
|
|
|
// Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
|
|
// this method returns void to reduce algorithmic complexity to O(1). The
|
|
// iterator is invalidated, so any increment should be done before calling
|
|
// erase. In order to erase while iterating across a map, use the following
|
|
// idiom (which also works for standard containers):
|
|
//
|
|
// for (auto it = m.begin(), end = m.end(); it != end;) {
|
|
// // `erase()` will invalidate `it`, so advance `it` first.
|
|
// auto copy_it = it++;
|
|
// if (<pred>) {
|
|
// m.erase(copy_it);
|
|
// }
|
|
// }
|
|
void erase(const_iterator cit) { erase(cit.inner_); }
|
|
|
|
// This overload is necessary because otherwise erase<K>(const K&) would be
|
|
// a better match if non-const iterator is passed as an argument.
|
|
void erase(iterator it) {
|
|
AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
|
|
destroy(it.slot());
|
|
erase_meta_only(it);
|
|
}
|
|
|
|
iterator erase(const_iterator first,
|
|
const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
// We check for empty first because ClearBackingArray requires that
|
|
// capacity() > 0 as a precondition.
|
|
if (empty()) return end();
|
|
if (first == begin() && last == end()) {
|
|
// TODO(ezb): we access control bytes in destroy_slots so it could make
|
|
// sense to combine destroy_slots and ClearBackingArray to avoid cache
|
|
// misses when the table is large. Note that we also do this in clear().
|
|
destroy_slots();
|
|
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
|
|
common().set_reserved_growth(common().reservation_size());
|
|
return end();
|
|
}
|
|
while (first != last) {
|
|
erase(first++);
|
|
}
|
|
return last.inner_;
|
|
}
|
|
|
|
// Moves elements from `src` into `this`.
|
|
// If the element already exists in `this`, it is left unmodified in `src`.
|
|
template <typename H, typename E>
|
|
void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
|
|
assert(this != &src);
|
|
for (auto it = src.begin(), e = src.end(); it != e;) {
|
|
auto next = std::next(it);
|
|
if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
|
|
PolicyTraits::element(it.slot()))
|
|
.second) {
|
|
src.erase_meta_only(it);
|
|
}
|
|
it = next;
|
|
}
|
|
}
|
|
|
|
template <typename H, typename E>
|
|
void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
|
|
merge(src);
|
|
}
|
|
|
|
node_type extract(const_iterator position) {
|
|
AssertIsFull(position.control(), position.inner_.generation(),
|
|
position.inner_.generation_ptr(), "extract()");
|
|
auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
|
|
erase_meta_only(position);
|
|
return node;
|
|
}
|
|
|
|
template <
|
|
class K = key_type,
|
|
typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
|
|
node_type extract(const key_arg<K>& key) {
|
|
auto it = find(key);
|
|
return it == end() ? node_type() : extract(const_iterator{it});
|
|
}
|
|
|
|
void swap(raw_hash_set& that) noexcept(
|
|
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
|
|
IsNoThrowSwappable<allocator_type>(
|
|
typename AllocTraits::propagate_on_container_swap{})) {
|
|
using std::swap;
|
|
swap(common(), that.common());
|
|
swap(hash_ref(), that.hash_ref());
|
|
swap(eq_ref(), that.eq_ref());
|
|
SwapAlloc(alloc_ref(), that.alloc_ref(),
|
|
typename AllocTraits::propagate_on_container_swap{});
|
|
}
|
|
|
|
void rehash(size_t n) {
|
|
if (n == 0 && capacity() == 0) return;
|
|
if (n == 0 && size() == 0) {
|
|
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
|
|
return;
|
|
}
|
|
|
|
// bitor is a faster way of doing `max` here. We will round up to the next
|
|
// power-of-2-minus-1, so bitor is good enough.
|
|
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
|
|
// n == 0 unconditionally rehashes as per the standard.
|
|
if (n == 0 || m > capacity()) {
|
|
resize(m);
|
|
|
|
// This is after resize, to ensure that we have completed the allocation
|
|
// and have potentially sampled the hashtable.
|
|
infoz().RecordReservation(n);
|
|
}
|
|
}
|
|
|
|
void reserve(size_t n) {
|
|
if (n > size() + growth_left()) {
|
|
size_t m = GrowthToLowerboundCapacity(n);
|
|
resize(NormalizeCapacity(m));
|
|
|
|
// This is after resize, to ensure that we have completed the allocation
|
|
// and have potentially sampled the hashtable.
|
|
infoz().RecordReservation(n);
|
|
}
|
|
common().reset_reserved_growth(n);
|
|
common().set_reservation_size(n);
|
|
}
|
|
|
|
// Extension API: support for heterogeneous keys.
|
|
//
|
|
// std::unordered_set<std::string> s;
|
|
// // Turns "abc" into std::string.
|
|
// s.count("abc");
|
|
//
|
|
// ch_set<std::string> s;
|
|
// // Uses "abc" directly without copying it into std::string.
|
|
// s.count("abc");
|
|
template <class K = key_type>
|
|
size_t count(const key_arg<K>& key) const {
|
|
return find(key) == end() ? 0 : 1;
|
|
}
|
|
|
|
// Issues CPU prefetch instructions for the memory needed to find or insert
|
|
// a key. Like all lookup functions, this support heterogeneous keys.
|
|
//
|
|
// NOTE: This is a very low level operation and should not be used without
|
|
// specific benchmarks indicating its importance.
|
|
template <class K = key_type>
|
|
void prefetch(const key_arg<K>& key) const {
|
|
(void)key;
|
|
// Avoid probing if we won't be able to prefetch the addresses received.
|
|
#ifdef ABSL_HAVE_PREFETCH
|
|
prefetch_heap_block();
|
|
auto seq = probe(common(), hash_ref()(key));
|
|
PrefetchToLocalCache(control() + seq.offset());
|
|
PrefetchToLocalCache(slot_array() + seq.offset());
|
|
#endif // ABSL_HAVE_PREFETCH
|
|
}
|
|
|
|
// The API of find() has two extensions.
|
|
//
|
|
// 1. The hash can be passed by the user. It must be equal to the hash of the
|
|
// key.
|
|
//
|
|
// 2. The type of the key argument doesn't have to be key_type. This is so
|
|
// called heterogeneous key support.
|
|
template <class K = key_type>
|
|
iterator find(const key_arg<K>& key,
|
|
size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
auto seq = probe(common(), hash);
|
|
slot_type* slot_ptr = slot_array();
|
|
const ctrl_t* ctrl = control();
|
|
while (true) {
|
|
Group g{ctrl + seq.offset()};
|
|
for (uint32_t i : g.Match(H2(hash))) {
|
|
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
|
|
EqualElement<K>{key, eq_ref()},
|
|
PolicyTraits::element(slot_ptr + seq.offset(i)))))
|
|
return iterator_at(seq.offset(i));
|
|
}
|
|
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
|
|
seq.next();
|
|
assert(seq.index() <= capacity() && "full table!");
|
|
}
|
|
}
|
|
template <class K = key_type>
|
|
iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
prefetch_heap_block();
|
|
return find(key, hash_ref()(key));
|
|
}
|
|
|
|
template <class K = key_type>
|
|
const_iterator find(const key_arg<K>& key,
|
|
size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return const_cast<raw_hash_set*>(this)->find(key, hash);
|
|
}
|
|
template <class K = key_type>
|
|
const_iterator find(const key_arg<K>& key) const
|
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
prefetch_heap_block();
|
|
return find(key, hash_ref()(key));
|
|
}
|
|
|
|
template <class K = key_type>
|
|
bool contains(const key_arg<K>& key) const {
|
|
return find(key) != end();
|
|
}
|
|
|
|
template <class K = key_type>
|
|
std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
|
|
ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
auto it = find(key);
|
|
if (it != end()) return {it, std::next(it)};
|
|
return {it, it};
|
|
}
|
|
template <class K = key_type>
|
|
std::pair<const_iterator, const_iterator> equal_range(
|
|
const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
auto it = find(key);
|
|
if (it != end()) return {it, std::next(it)};
|
|
return {it, it};
|
|
}
|
|
|
|
size_t bucket_count() const { return capacity(); }
|
|
float load_factor() const {
|
|
return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
|
|
}
|
|
float max_load_factor() const { return 1.0f; }
|
|
void max_load_factor(float) {
|
|
// Does nothing.
|
|
}
|
|
|
|
hasher hash_function() const { return hash_ref(); }
|
|
key_equal key_eq() const { return eq_ref(); }
|
|
allocator_type get_allocator() const { return alloc_ref(); }
|
|
|
|
friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
|
|
if (a.size() != b.size()) return false;
|
|
const raw_hash_set* outer = &a;
|
|
const raw_hash_set* inner = &b;
|
|
if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
|
|
for (const value_type& elem : *outer) {
|
|
auto it = PolicyTraits::apply(FindElement{*inner}, elem);
|
|
if (it == inner->end() || !(*it == elem)) return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
|
|
return !(a == b);
|
|
}
|
|
|
|
template <typename H>
|
|
friend typename std::enable_if<H::template is_hashable<value_type>::value,
|
|
H>::type
|
|
AbslHashValue(H h, const raw_hash_set& s) {
|
|
return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
|
|
s.size());
|
|
}
|
|
|
|
friend void swap(raw_hash_set& a,
|
|
raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
|
|
a.swap(b);
|
|
}
|
|
|
|
private:
|
|
template <class Container, typename Enabler>
|
|
friend struct absl::container_internal::hashtable_debug_internal::
|
|
HashtableDebugAccess;
|
|
|
|
struct FindElement {
|
|
template <class K, class... Args>
|
|
const_iterator operator()(const K& key, Args&&...) const {
|
|
return s.find(key);
|
|
}
|
|
const raw_hash_set& s;
|
|
};
|
|
|
|
struct HashElement {
|
|
template <class K, class... Args>
|
|
size_t operator()(const K& key, Args&&...) const {
|
|
return h(key);
|
|
}
|
|
const hasher& h;
|
|
};
|
|
|
|
template <class K1>
|
|
struct EqualElement {
|
|
template <class K2, class... Args>
|
|
bool operator()(const K2& lhs, Args&&...) const {
|
|
return eq(lhs, rhs);
|
|
}
|
|
const K1& rhs;
|
|
const key_equal& eq;
|
|
};
|
|
|
|
struct EmplaceDecomposable {
|
|
template <class K, class... Args>
|
|
std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
|
|
auto res = s.find_or_prepare_insert(key);
|
|
if (res.second) {
|
|
s.emplace_at(res.first, std::forward<Args>(args)...);
|
|
}
|
|
return {s.iterator_at(res.first), res.second};
|
|
}
|
|
raw_hash_set& s;
|
|
};
|
|
|
|
template <bool do_destroy>
|
|
struct InsertSlot {
|
|
template <class K, class... Args>
|
|
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
|
|
auto res = s.find_or_prepare_insert(key);
|
|
if (res.second) {
|
|
s.transfer(s.slot_array() + res.first, &slot);
|
|
} else if (do_destroy) {
|
|
s.destroy(&slot);
|
|
}
|
|
return {s.iterator_at(res.first), res.second};
|
|
}
|
|
raw_hash_set& s;
|
|
// Constructed slot. Either moved into place or destroyed.
|
|
slot_type&& slot;
|
|
};
|
|
|
|
// TODO(b/303305702): re-enable reentrant validation.
|
|
template <typename... Args>
|
|
inline void construct(slot_type* slot, Args&&... args) {
|
|
PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
|
|
}
|
|
inline void destroy(slot_type* slot) {
|
|
PolicyTraits::destroy(&alloc_ref(), slot);
|
|
}
|
|
inline void transfer(slot_type* to, slot_type* from) {
|
|
PolicyTraits::transfer(&alloc_ref(), to, from);
|
|
}
|
|
|
|
inline void destroy_slots() {
|
|
const size_t cap = capacity();
|
|
const ctrl_t* ctrl = control();
|
|
slot_type* slot = slot_array();
|
|
for (size_t i = 0; i != cap; ++i) {
|
|
if (IsFull(ctrl[i])) {
|
|
destroy(slot + i);
|
|
}
|
|
}
|
|
}
|
|
|
|
inline void dealloc() {
|
|
assert(capacity() != 0);
|
|
// Unpoison before returning the memory to the allocator.
|
|
SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
|
|
infoz().Unregister();
|
|
Deallocate<BackingArrayAlignment(alignof(slot_type))>(
|
|
&alloc_ref(), common().backing_array_start(),
|
|
common().alloc_size(sizeof(slot_type), alignof(slot_type)));
|
|
}
|
|
|
|
inline void destructor_impl() {
|
|
if (capacity() == 0) return;
|
|
destroy_slots();
|
|
dealloc();
|
|
}
|
|
|
|
// Erases, but does not destroy, the value pointed to by `it`.
|
|
//
|
|
// This merely updates the pertinent control byte. This can be used in
|
|
// conjunction with Policy::transfer to move the object to another place.
|
|
void erase_meta_only(const_iterator it) {
|
|
EraseMetaOnly(common(), it.control(), sizeof(slot_type));
|
|
}
|
|
|
|
// Allocates a backing array for `self` and initializes its control bytes.
|
|
// This reads `capacity` and updates all other fields based on the result of
|
|
// the allocation.
|
|
//
|
|
// This does not free the currently held array; `capacity` must be nonzero.
|
|
inline void initialize_slots() {
|
|
// People are often sloppy with the exact type of their allocator (sometimes
|
|
// it has an extra const or is missing the pair, but rebinds made it work
|
|
// anyway).
|
|
using CharAlloc =
|
|
typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
|
|
InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
|
|
common(), CharAlloc(alloc_ref()));
|
|
}
|
|
|
|
ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
|
|
assert(IsValidCapacity(new_capacity));
|
|
auto* old_ctrl = control();
|
|
auto* old_slots = slot_array();
|
|
const bool had_infoz = common().has_infoz();
|
|
const size_t old_capacity = common().capacity();
|
|
common().set_capacity(new_capacity);
|
|
initialize_slots();
|
|
|
|
auto* new_slots = slot_array();
|
|
size_t total_probe_length = 0;
|
|
for (size_t i = 0; i != old_capacity; ++i) {
|
|
if (IsFull(old_ctrl[i])) {
|
|
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
|
|
PolicyTraits::element(old_slots + i));
|
|
auto target = find_first_non_full(common(), hash);
|
|
size_t new_i = target.offset;
|
|
total_probe_length += target.probe_length;
|
|
SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
|
|
transfer(new_slots + new_i, old_slots + i);
|
|
}
|
|
}
|
|
if (old_capacity) {
|
|
SanitizerUnpoisonMemoryRegion(old_slots,
|
|
sizeof(slot_type) * old_capacity);
|
|
Deallocate<BackingArrayAlignment(alignof(slot_type))>(
|
|
&alloc_ref(), old_ctrl - ControlOffset(had_infoz),
|
|
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type),
|
|
had_infoz));
|
|
}
|
|
infoz().RecordRehash(total_probe_length);
|
|
}
|
|
|
|
// Prunes control bytes to remove as many tombstones as possible.
|
|
//
|
|
// See the comment on `rehash_and_grow_if_necessary()`.
|
|
inline void drop_deletes_without_resize() {
|
|
// Stack-allocate space for swapping elements.
|
|
alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
|
|
DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
|
|
}
|
|
|
|
// Called whenever the table *might* need to conditionally grow.
|
|
//
|
|
// This function is an optimization opportunity to perform a rehash even when
|
|
// growth is unnecessary, because vacating tombstones is beneficial for
|
|
// performance in the long-run.
|
|
void rehash_and_grow_if_necessary() {
|
|
const size_t cap = capacity();
|
|
if (cap > Group::kWidth &&
|
|
// Do these calculations in 64-bit to avoid overflow.
|
|
size() * uint64_t{32} <= cap * uint64_t{25}) {
|
|
// Squash DELETED without growing if there is enough capacity.
|
|
//
|
|
// Rehash in place if the current size is <= 25/32 of capacity.
|
|
// Rationale for such a high factor: 1) drop_deletes_without_resize() is
|
|
// faster than resize, and 2) it takes quite a bit of work to add
|
|
// tombstones. In the worst case, seems to take approximately 4
|
|
// insert/erase pairs to create a single tombstone and so if we are
|
|
// rehashing because of tombstones, we can afford to rehash-in-place as
|
|
// long as we are reclaiming at least 1/8 the capacity without doing more
|
|
// than 2X the work. (Where "work" is defined to be size() for rehashing
|
|
// or rehashing in place, and 1 for an insert or erase.) But rehashing in
|
|
// place is faster per operation than inserting or even doubling the size
|
|
// of the table, so we actually afford to reclaim even less space from a
|
|
// resize-in-place. The decision is to rehash in place if we can reclaim
|
|
// at about 1/8th of the usable capacity (specifically 3/28 of the
|
|
// capacity) which means that the total cost of rehashing will be a small
|
|
// fraction of the total work.
|
|
//
|
|
// Here is output of an experiment using the BM_CacheInSteadyState
|
|
// benchmark running the old case (where we rehash-in-place only if we can
|
|
// reclaim at least 7/16*capacity) vs. this code (which rehashes in place
|
|
// if we can recover 3/32*capacity).
|
|
//
|
|
// Note that although in the worst-case number of rehashes jumped up from
|
|
// 15 to 190, but the number of operations per second is almost the same.
|
|
//
|
|
// Abridged output of running BM_CacheInSteadyState benchmark from
|
|
// raw_hash_set_benchmark. N is the number of insert/erase operations.
|
|
//
|
|
// | OLD (recover >= 7/16 | NEW (recover >= 3/32)
|
|
// size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
|
|
// 448 | 145284 0.44 18 | 140118 0.44 19
|
|
// 493 | 152546 0.24 11 | 151417 0.48 28
|
|
// 538 | 151439 0.26 11 | 151152 0.53 38
|
|
// 583 | 151765 0.28 11 | 150572 0.57 50
|
|
// 628 | 150241 0.31 11 | 150853 0.61 66
|
|
// 672 | 149602 0.33 12 | 150110 0.66 90
|
|
// 717 | 149998 0.35 12 | 149531 0.70 129
|
|
// 762 | 149836 0.37 13 | 148559 0.74 190
|
|
// 807 | 149736 0.39 14 | 151107 0.39 14
|
|
// 852 | 150204 0.42 15 | 151019 0.42 15
|
|
drop_deletes_without_resize();
|
|
} else {
|
|
// Otherwise grow the container.
|
|
resize(NextCapacity(cap));
|
|
}
|
|
}
|
|
|
|
void maybe_increment_generation_or_rehash_on_move() {
|
|
common().maybe_increment_generation_on_move();
|
|
if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
|
|
resize(capacity());
|
|
}
|
|
}
|
|
|
|
template<bool propagate_alloc>
|
|
raw_hash_set& assign_impl(raw_hash_set&& that) {
|
|
// We don't bother checking for this/that aliasing. We just need to avoid
|
|
// breaking the invariants in that case.
|
|
destructor_impl();
|
|
common() = std::move(that.common());
|
|
// TODO(b/296061262): move instead of copying hash/eq/alloc.
|
|
hash_ref() = that.hash_ref();
|
|
eq_ref() = that.eq_ref();
|
|
CopyAlloc(alloc_ref(), that.alloc_ref(),
|
|
std::integral_constant<bool, propagate_alloc>());
|
|
that.common() = CommonFields{};
|
|
maybe_increment_generation_or_rehash_on_move();
|
|
return *this;
|
|
}
|
|
|
|
raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
|
|
const size_t size = that.size();
|
|
if (size == 0) return *this;
|
|
reserve(size);
|
|
for (iterator it = that.begin(); it != that.end(); ++it) {
|
|
insert(std::move(PolicyTraits::element(it.slot())));
|
|
that.destroy(it.slot());
|
|
}
|
|
that.dealloc();
|
|
that.common() = CommonFields{};
|
|
maybe_increment_generation_or_rehash_on_move();
|
|
return *this;
|
|
}
|
|
|
|
raw_hash_set& move_assign(raw_hash_set&& that,
|
|
std::true_type /*propagate_alloc*/) {
|
|
return assign_impl<true>(std::move(that));
|
|
}
|
|
raw_hash_set& move_assign(raw_hash_set&& that,
|
|
std::false_type /*propagate_alloc*/) {
|
|
if (alloc_ref() == that.alloc_ref()) {
|
|
return assign_impl<false>(std::move(that));
|
|
}
|
|
// Aliasing can't happen here because allocs would compare equal above.
|
|
assert(this != &that);
|
|
destructor_impl();
|
|
// We can't take over that's memory so we need to move each element.
|
|
// While moving elements, this should have that's hash/eq so copy hash/eq
|
|
// before moving elements.
|
|
// TODO(b/296061262): move instead of copying hash/eq.
|
|
hash_ref() = that.hash_ref();
|
|
eq_ref() = that.eq_ref();
|
|
return move_elements_allocs_unequal(std::move(that));
|
|
}
|
|
|
|
protected:
|
|
// Attempts to find `key` in the table; if it isn't found, returns a slot that
|
|
// the value can be inserted into, with the control byte already set to
|
|
// `key`'s H2.
|
|
template <class K>
|
|
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
|
|
prefetch_heap_block();
|
|
auto hash = hash_ref()(key);
|
|
auto seq = probe(common(), hash);
|
|
const ctrl_t* ctrl = control();
|
|
while (true) {
|
|
Group g{ctrl + seq.offset()};
|
|
for (uint32_t i : g.Match(H2(hash))) {
|
|
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
|
|
EqualElement<K>{key, eq_ref()},
|
|
PolicyTraits::element(slot_array() + seq.offset(i)))))
|
|
return {seq.offset(i), false};
|
|
}
|
|
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
|
|
seq.next();
|
|
assert(seq.index() <= capacity() && "full table!");
|
|
}
|
|
return {prepare_insert(hash), true};
|
|
}
|
|
|
|
// Given the hash of a value not currently in the table, finds the next
|
|
// viable slot index to insert it at.
|
|
//
|
|
// REQUIRES: At least one non-full slot available.
|
|
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
|
|
const bool rehash_for_bug_detection =
|
|
common().should_rehash_for_bug_detection_on_insert();
|
|
if (rehash_for_bug_detection) {
|
|
// Move to a different heap allocation in order to detect bugs.
|
|
const size_t cap = capacity();
|
|
resize(growth_left() > 0 ? cap : NextCapacity(cap));
|
|
}
|
|
auto target = find_first_non_full(common(), hash);
|
|
if (!rehash_for_bug_detection &&
|
|
ABSL_PREDICT_FALSE(growth_left() == 0 &&
|
|
!IsDeleted(control()[target.offset]))) {
|
|
rehash_and_grow_if_necessary();
|
|
target = find_first_non_full(common(), hash);
|
|
}
|
|
common().increment_size();
|
|
set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
|
|
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
|
|
common().maybe_increment_generation_on_insert();
|
|
infoz().RecordInsert(hash, target.probe_length);
|
|
return target.offset;
|
|
}
|
|
|
|
// Constructs the value in the space pointed by the iterator. This only works
|
|
// after an unsuccessful find_or_prepare_insert() and before any other
|
|
// modifications happen in the raw_hash_set.
|
|
//
|
|
// PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
|
|
// k is the key decomposed from `forward<Args>(args)...`, and the bool
|
|
// returned by find_or_prepare_insert(k) was true.
|
|
// POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
|
|
template <class... Args>
|
|
void emplace_at(size_t i, Args&&... args) {
|
|
construct(slot_array() + i, std::forward<Args>(args)...);
|
|
|
|
assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
|
|
iterator_at(i) &&
|
|
"constructed value does not match the lookup key");
|
|
}
|
|
|
|
iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return {control() + i, slot_array() + i, common().generation_ptr()};
|
|
}
|
|
const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
|
|
return {control() + i, slot_array() + i, common().generation_ptr()};
|
|
}
|
|
|
|
private:
|
|
friend struct RawHashSetTestOnlyAccess;
|
|
|
|
// The number of slots we can still fill without needing to rehash.
|
|
//
|
|
// This is stored separately due to tombstones: we do not include tombstones
|
|
// in the growth capacity, because we'd like to rehash when the table is
|
|
// otherwise filled with tombstones: otherwise, probe sequences might get
|
|
// unacceptably long without triggering a rehash. Callers can also force a
|
|
// rehash via the standard `rehash(0)`, which will recompute this value as a
|
|
// side-effect.
|
|
//
|
|
// See `CapacityToGrowth()`.
|
|
size_t growth_left() const { return common().growth_left(); }
|
|
void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
|
|
|
|
// Prefetch the heap-allocated memory region to resolve potential TLB and
|
|
// cache misses. This is intended to overlap with execution of calculating the
|
|
// hash for a key.
|
|
void prefetch_heap_block() const {
|
|
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
|
|
__builtin_prefetch(control(), 0, 1);
|
|
#endif
|
|
}
|
|
|
|
CommonFields& common() { return settings_.template get<0>(); }
|
|
const CommonFields& common() const { return settings_.template get<0>(); }
|
|
|
|
ctrl_t* control() const { return common().control(); }
|
|
slot_type* slot_array() const {
|
|
return static_cast<slot_type*>(common().slot_array());
|
|
}
|
|
HashtablezInfoHandle infoz() { return common().infoz(); }
|
|
|
|
hasher& hash_ref() { return settings_.template get<1>(); }
|
|
const hasher& hash_ref() const { return settings_.template get<1>(); }
|
|
key_equal& eq_ref() { return settings_.template get<2>(); }
|
|
const key_equal& eq_ref() const { return settings_.template get<2>(); }
|
|
allocator_type& alloc_ref() { return settings_.template get<3>(); }
|
|
const allocator_type& alloc_ref() const {
|
|
return settings_.template get<3>();
|
|
}
|
|
|
|
// Make type-specific functions for this type's PolicyFunctions struct.
|
|
static size_t hash_slot_fn(void* set, void* slot) {
|
|
auto* h = static_cast<raw_hash_set*>(set);
|
|
return PolicyTraits::apply(
|
|
HashElement{h->hash_ref()},
|
|
PolicyTraits::element(static_cast<slot_type*>(slot)));
|
|
}
|
|
static void transfer_slot_fn(void* set, void* dst, void* src) {
|
|
auto* h = static_cast<raw_hash_set*>(set);
|
|
h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
|
|
}
|
|
// Note: dealloc_fn will only be used if we have a non-standard allocator.
|
|
static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
|
|
auto* set = reinterpret_cast<raw_hash_set*>(&common);
|
|
|
|
// Unpoison before returning the memory to the allocator.
|
|
SanitizerUnpoisonMemoryRegion(common.slot_array(),
|
|
sizeof(slot_type) * common.capacity());
|
|
|
|
common.infoz().Unregister();
|
|
Deallocate<BackingArrayAlignment(alignof(slot_type))>(
|
|
&set->alloc_ref(), common.backing_array_start(),
|
|
common.alloc_size(sizeof(slot_type), alignof(slot_type)));
|
|
}
|
|
|
|
static const PolicyFunctions& GetPolicyFunctions() {
|
|
static constexpr PolicyFunctions value = {
|
|
sizeof(slot_type),
|
|
&raw_hash_set::hash_slot_fn,
|
|
PolicyTraits::transfer_uses_memcpy()
|
|
? TransferRelocatable<sizeof(slot_type)>
|
|
: &raw_hash_set::transfer_slot_fn,
|
|
(std::is_same<SlotAlloc, std::allocator<slot_type>>::value
|
|
? &DeallocateStandard<alignof(slot_type)>
|
|
: &raw_hash_set::dealloc_fn),
|
|
};
|
|
return value;
|
|
}
|
|
|
|
// Bundle together CommonFields plus other objects which might be empty.
|
|
// CompressedTuple will ensure that sizeof is not affected by any of the empty
|
|
// fields that occur after CommonFields.
|
|
absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
|
|
allocator_type>
|
|
settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
|
|
};
|
|
|
|
// Erases all elements that satisfy the predicate `pred` from the container `c`.
|
|
template <typename P, typename H, typename E, typename A, typename Predicate>
|
|
typename raw_hash_set<P, H, E, A>::size_type EraseIf(
|
|
Predicate& pred, raw_hash_set<P, H, E, A>* c) {
|
|
const auto initial_size = c->size();
|
|
for (auto it = c->begin(), last = c->end(); it != last;) {
|
|
if (pred(*it)) {
|
|
c->erase(it++);
|
|
} else {
|
|
++it;
|
|
}
|
|
}
|
|
return initial_size - c->size();
|
|
}
|
|
|
|
namespace hashtable_debug_internal {
|
|
template <typename Set>
|
|
struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
|
|
using Traits = typename Set::PolicyTraits;
|
|
using Slot = typename Traits::slot_type;
|
|
|
|
static size_t GetNumProbes(const Set& set,
|
|
const typename Set::key_type& key) {
|
|
size_t num_probes = 0;
|
|
size_t hash = set.hash_ref()(key);
|
|
auto seq = probe(set.common(), hash);
|
|
const ctrl_t* ctrl = set.control();
|
|
while (true) {
|
|
container_internal::Group g{ctrl + seq.offset()};
|
|
for (uint32_t i : g.Match(container_internal::H2(hash))) {
|
|
if (Traits::apply(
|
|
typename Set::template EqualElement<typename Set::key_type>{
|
|
key, set.eq_ref()},
|
|
Traits::element(set.slot_array() + seq.offset(i))))
|
|
return num_probes;
|
|
++num_probes;
|
|
}
|
|
if (g.MaskEmpty()) return num_probes;
|
|
seq.next();
|
|
++num_probes;
|
|
}
|
|
}
|
|
|
|
static size_t AllocatedByteSize(const Set& c) {
|
|
size_t capacity = c.capacity();
|
|
if (capacity == 0) return 0;
|
|
size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
|
|
|
|
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
|
|
if (per_slot != ~size_t{}) {
|
|
m += per_slot * c.size();
|
|
} else {
|
|
for (auto it = c.begin(); it != c.end(); ++it) {
|
|
m += Traits::space_used(it.slot());
|
|
}
|
|
}
|
|
return m;
|
|
}
|
|
};
|
|
|
|
} // namespace hashtable_debug_internal
|
|
} // namespace container_internal
|
|
ABSL_NAMESPACE_END
|
|
} // namespace absl
|
|
|
|
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
|
|
|
|
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
|