Commit 1449c9a1 by Evan Brown Committed by Copybara-Service

Implement small object optimization in swisstable - disabled for now.

Details:
- We use the space for control/slots pointers as the inline buffer.
- We use a max inline capacity of 1 to make the implementation much simpler and to avoid having to randomize the iteration order for inline tables.
- For iteration of inline tables, we introduce the kSooControl buffer which just has 1 full control byte followed by 1 sentinel control byte so that incrementing yields an end() iterator. We don't access kSooControl during lookups - only iteration.
PiperOrigin-RevId: 613253492
Change-Id: Id98ff11842f8bef27ac7ed88138dc03b46ce4fa6
parent 6bf3c73f
......@@ -181,15 +181,13 @@ TEST(FlatHashSet, EraseIf) {
}
}
class PoisonInline {
class PoisonSoo {
int64_t data_;
public:
explicit PoisonInline(int64_t d) : data_(d) {
SanitizerPoisonObject(&data_);
}
PoisonInline(const PoisonInline& that) : PoisonInline(*that) {}
~PoisonInline() { SanitizerUnpoisonObject(&data_); }
explicit PoisonSoo(int64_t d) : data_(d) { SanitizerPoisonObject(&data_); }
PoisonSoo(const PoisonSoo& that) : PoisonSoo(*that) {}
~PoisonSoo() { SanitizerUnpoisonObject(&data_); }
int64_t operator*() const {
SanitizerUnpoisonObject(&data_);
......@@ -198,45 +196,56 @@ class PoisonInline {
return ret;
}
template <typename H>
friend H AbslHashValue(H h, const PoisonInline& pi) {
friend H AbslHashValue(H h, const PoisonSoo& pi) {
return H::combine(std::move(h), *pi);
}
bool operator==(const PoisonInline& rhs) const { return **this == *rhs; }
bool operator==(const PoisonSoo& rhs) const { return **this == *rhs; }
};
// Tests that we don't touch the poison_ member of PoisonInline.
TEST(FlatHashSet, PoisonInline) {
PoisonInline a(0), b(1);
{ // basic usage
flat_hash_set<PoisonInline> set;
set.insert(a);
EXPECT_THAT(set, UnorderedElementsAre(a));
set.insert(b);
EXPECT_THAT(set, UnorderedElementsAre(a, b));
set.erase(a);
EXPECT_THAT(set, UnorderedElementsAre(b));
set.rehash(0); // shrink to inline
EXPECT_THAT(set, UnorderedElementsAre(b));
}
{ // test move constructor from inline to inline
flat_hash_set<PoisonInline> set;
set.insert(a);
flat_hash_set<PoisonInline> set2(std::move(set));
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
{ // test move assignment from inline to inline
flat_hash_set<PoisonInline> set, set2;
set.insert(a);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
{ // test alloc move constructor from inline to inline
flat_hash_set<PoisonInline> set;
set.insert(a);
flat_hash_set<PoisonInline> set2(std::move(set),
std::allocator<PoisonInline>());
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooBasic) {
PoisonSoo a(0), b(1);
flat_hash_set<PoisonSoo> set;
set.insert(a);
EXPECT_THAT(set, UnorderedElementsAre(a));
set.insert(b);
EXPECT_THAT(set, UnorderedElementsAre(a, b));
set.erase(a);
EXPECT_THAT(set, UnorderedElementsAre(b));
set.rehash(0); // Shrink to SOO.
EXPECT_THAT(set, UnorderedElementsAre(b));
}
TEST(FlatHashSet, PoisonSooMoveConstructSooToSoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set;
set.insert(a);
flat_hash_set<PoisonSoo> set2(std::move(set));
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooAllocMoveConstructSooToSoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set;
set.insert(a);
flat_hash_set<PoisonSoo> set2(std::move(set), std::allocator<PoisonSoo>());
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooMoveAssignFullSooToEmptySoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set, set2;
set.insert(a);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooMoveAssignFullSooToFullSoo) {
PoisonSoo a(0), b(1);
flat_hash_set<PoisonSoo> set, set2;
set.insert(a);
set2.insert(b);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, FlatHashSetPolicyDestroyReturnsTrue) {
......
......@@ -168,6 +168,9 @@ struct hash_policy_traits : common_policy_traits<Policy> {
#endif
}
// Whether small object optimization is enabled. False by default.
static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); }
private:
template <class Hash>
struct HashElement {
......@@ -183,6 +186,18 @@ struct hash_policy_traits : common_policy_traits<Policy> {
return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)},
Policy::element(static_cast<slot_type*>(slot)));
}
// Use go/ranked-overloads for dispatching. Rank1 is preferred.
struct Rank0 {};
struct Rank1 : Rank0 {};
// Use auto -> decltype as an enabler.
template <class P = Policy>
static constexpr auto soo_enabled_impl(Rank1) -> decltype(P::soo_enabled()) {
return P::soo_enabled();
}
static constexpr bool soo_enabled_impl(Rank0) { return false; }
};
} // namespace container_internal
......
......@@ -30,12 +30,14 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
// Represents a control byte corresponding to a full slot with arbitrary hash.
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
// We have space for `growth_left` before a single block of control bytes. A
// single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find(). In order to ensure
// that the control bytes are aligned to 16, we have 16 bytes before the control
// bytes even though growth_left only needs 8.
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
......@@ -46,6 +48,18 @@ alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
// We need one full byte followed by a sentinel byte for iterator::operator++ to
// work. We have a full group after kSentinel to be safe (in case operator++ is
// changed to read a full group).
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = {
ZeroCtrlT(), ctrl_t::kSentinel, ZeroCtrlT(), ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty};
static_assert(NumControlBytes(SooCapacity()) <= 17,
"kSooControl capacity too small");
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif
......@@ -111,6 +125,20 @@ bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
CommonFields& common) {
assert(common.capacity() == NextCapacity(SooCapacity()));
// After resize from capacity 1 to 3, we always have exactly the slot with
// index 1 occupied, so we need to insert either at index 0 or index 2.
assert(HashSetResizeHelper::SooSlotIndex() == 1);
PrepareInsertCommon(common);
const size_t offset = H1(hash, common.control()) & 2;
common.set_growth_left(common.growth_left() - 1);
SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
return offset;
}
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == ctrl_t::kSentinel);
assert(IsValidCapacity(capacity));
......@@ -254,9 +282,10 @@ void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) {
bool reuse, bool soo_enabled) {
c.set_size(0);
if (reuse) {
assert(!soo_enabled || c.capacity() > SooCapacity());
ResetCtrl(c, policy.slot_size);
ResetGrowthLeft(c);
c.infoz().RecordStorageChanged(0, c.capacity());
......@@ -264,12 +293,9 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
// We need to record infoz before calling dealloc, which will unregister
// infoz.
c.infoz().RecordClearedReservation();
c.infoz().RecordStorageChanged(0, 0);
c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
(*policy.dealloc)(c, policy);
c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
c.set_slots(nullptr);
c.set_capacity(0);
c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{};
}
}
......@@ -286,7 +312,7 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
// Copy second half of bytes to the beginning.
// We potentially copy more bytes in order to have compile time known size.
// Mirrored bytes from the old_ctrl_ will also be copied.
// Mirrored bytes from the old_ctrl() will also be copied.
// In case of old_capacity_ == 3, we will copy 1st element twice.
// Examples:
// old_ctrl = 0S0EEEEEEE...
......@@ -297,7 +323,7 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
//
// old_ctrl = 0123456S0123456EE...
// new_ctrl = 456S0123?????????...
std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth);
std::memcpy(new_ctrl, old_ctrl() + half_old_capacity + 1, kHalfWidth);
// Clean up copied kSentinel from old_ctrl.
new_ctrl[half_old_capacity] = ctrl_t::kEmpty;
......@@ -348,34 +374,55 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
new_ctrl[new_capacity] = ctrl_t::kSentinel;
}
void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
size_t new_capacity) {
assert(is_single_group(new_capacity));
std::memset(new_ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
NumControlBytes(new_capacity));
assert(HashSetResizeHelper::SooSlotIndex() == 1);
// This allows us to avoid branching on had_soo_slot_.
assert(had_soo_slot_ || h2 == ctrl_t::kEmpty);
new_ctrl[1] = new_ctrl[new_capacity + 2] = h2;
new_ctrl[new_capacity] = ctrl_t::kSentinel;
}
void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
void* old_slots, void* new_slots, size_t slot_size) const {
void* new_slots, size_t slot_size) const {
assert(old_capacity_ > 0);
const size_t half_old_capacity = old_capacity_ / 2;
SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
std::memcpy(new_slots,
SlotAddress(old_slots, half_old_capacity + 1, slot_size),
SlotAddress(old_slots(), half_old_capacity + 1, slot_size),
slot_size * half_old_capacity);
std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size),
old_slots, slot_size * (half_old_capacity + 1));
old_slots(), slot_size * (half_old_capacity + 1));
}
void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
CommonFields& c, void* old_slots, size_t slot_size) {
CommonFields& c, size_t slot_size) {
assert(old_capacity_ < Group::kWidth / 2);
assert(is_single_group(c.capacity()));
assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(),
slot_size);
GrowIntoSingleGroupShuffleTransferableSlots(c.slot_array(), slot_size);
// We poison since GrowIntoSingleGroupShuffleTransferableSlots
// may leave empty slots unpoisoned.
PoisonSingleGroupEmptySlots(c, slot_size);
}
void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
size_t slot_size) {
assert(was_soo_);
assert(had_soo_slot_);
assert(is_single_group(c.capacity()));
std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size),
old_soo_data(), slot_size);
PoisonSingleGroupEmptySlots(c, slot_size);
}
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
......@@ -100,6 +100,13 @@
// Storing control bytes in a separate array also has beneficial cache effects,
// since more logical slots will fit into a cache line.
//
// # Small Object Optimization (SOO)
//
// When the size/alignment of the value_type and the capacity of the table are
// small, we enable small object optimization and store the values inline in
// the raw_hash_set object. This optimization allows us to avoid
// allocation/deallocation as well as cache/dTLB misses.
//
// # Hashing
//
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
......@@ -531,10 +538,24 @@ ABSL_DLL extern const ctrl_t kEmptyGroup[32];
// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
// Const must be cast away here; no uses of this function will actually write
// to it, because it is only used for empty tables.
// to it because it is only used for empty tables.
return const_cast<ctrl_t*>(kEmptyGroup + 16);
}
// For use in SOO iterators.
// TODO(b/289225379): we could potentially get rid of this by adding an is_soo
// bit in iterators. This would add branches but reduce cache misses.
ABSL_DLL extern const ctrl_t kSooControl[17];
// Returns a pointer to a full byte followed by a sentinel byte.
inline ctrl_t* SooControl() {
// Const must be cast away here; no uses of this function will actually write
// to it because it is only used for SOO iterators.
return const_cast<ctrl_t*>(kSooControl);
}
// Whether ctrl is from the SooControl array.
inline bool IsSooControl(const ctrl_t* ctrl) { return ctrl == SooControl(); }
// Returns a pointer to a generation to use for an empty hashtable.
GenerationType* EmptyGeneration();
......@@ -591,9 +612,7 @@ inline h2_t H2(size_t hash) { return hash & 0x7F; }
// Helpers for checking the state of a control byte.
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
inline bool IsFull(ctrl_t c) {
return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
}
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
......@@ -1047,7 +1066,7 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
// Returns the number of control bytes including cloned.
inline size_t NumControlBytes(size_t capacity) {
constexpr size_t NumControlBytes(size_t capacity) {
return capacity + 1 + NumClonedBytes();
}
......@@ -1098,12 +1117,64 @@ class RawHashSetLayout {
size_t slot_offset_;
};
// We only allow a maximum of 1 SOO element, which makes the implementation
// much simpler. Complications with multiple SOO elements include:
// - Satisfying the guarantee that erasing one element doesn't invalidate
// iterators to other elements means we would probably need actual SOO
// control bytes.
// - In order to prevent user code from depending on iteration order for small
// tables, we would need to randomize the iteration order somehow.
constexpr size_t SooCapacity() { return 1; }
// Sentinel type to indicate SOO CommonFields construction.
struct soo_tag_t {};
// Sentinel type to indicate SOO CommonFields construction with full size.
struct full_soo_tag_t {};
// This allows us to work around an uninitialized memory warning when
// constructing begin() iterators in empty hashtables.
union MaybeInitializedPtr {
void* p;
};
struct HeapPtrs {
HeapPtrs() = default;
explicit HeapPtrs(ctrl_t* c) : control(c) {}
// The control bytes (and, also, a pointer near to the base of the backing
// array).
//
// This contains `capacity + 1 + NumClonedBytes()` entries, even
// when the table is empty (hence EmptyGroup).
//
// Note that growth_left is stored immediately before this pointer.
// May be uninitialized for SOO tables.
ctrl_t* control;
// The beginning of the slots, located at `SlotOffset()` bytes after
// `control`. May be uninitialized for empty tables.
// Note: we can't use `slots` because Qt defines "slots" as a macro.
MaybeInitializedPtr slot_array;
};
// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
union HeapOrSoo {
HeapOrSoo() = default;
explicit HeapOrSoo(ctrl_t* c) : heap(c) {}
HeapPtrs heap;
unsigned char soo_data[sizeof(HeapPtrs)];
};
// CommonFields hold the fields in raw_hash_set that do not depend
// on template parameters. This allows us to conveniently pass all
// of this state to helper functions as a single argument.
class CommonFields : public CommonFieldsGenerationInfo {
public:
CommonFields() = default;
CommonFields() : capacity_(0), size_(0), heap_or_soo_(EmptyGroup()) {}
explicit CommonFields(soo_tag_t) : capacity_(SooCapacity()), size_(0) {}
explicit CommonFields(full_soo_tag_t)
: capacity_(SooCapacity()), size_(size_t{1} << HasInfozShift()) {}
// Not copyable
CommonFields(const CommonFields&) = delete;
......@@ -1113,8 +1184,20 @@ class CommonFields : public CommonFieldsGenerationInfo {
CommonFields(CommonFields&& that) = default;
CommonFields& operator=(CommonFields&&) = default;
ctrl_t* control() const { return control_; }
void set_control(ctrl_t* c) { control_ = c; }
template <bool kSooEnabled>
static CommonFields CreateDefault() {
return kSooEnabled ? CommonFields{soo_tag_t{}} : CommonFields{};
}
// The inline data for SOO is written on top of control_/slots_.
const void* soo_data() const { return heap_or_soo_.soo_data; }
void* soo_data() { return heap_or_soo_.soo_data; }
HeapOrSoo heap_or_soo() const { return heap_or_soo_; }
const HeapOrSoo& heap_or_soo_ref() const { return heap_or_soo_; }
ctrl_t* control() const { return heap_or_soo_.heap.control; }
void set_control(ctrl_t* c) { heap_or_soo_.heap.control = c; }
void* backing_array_start() const {
// growth_left (and maybe infoz) is stored before control bytes.
assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
......@@ -1122,14 +1205,33 @@ class CommonFields : public CommonFieldsGenerationInfo {
}
// Note: we can't use slots() because Qt defines "slots" as a macro.
void* slot_array() const { return slots_; }
void set_slots(void* s) { slots_ = s; }
void* slot_array() const { return heap_or_soo_.heap.slot_array.p; }
MaybeInitializedPtr slots_union() const {
// Suppress erroneous uninitialized memory errors on GCC.
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
return heap_or_soo_.heap.slot_array;
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
void set_slots(void* s) { heap_or_soo_.heap.slot_array.p = s; }
// The number of filled slots.
size_t size() const { return size_ >> HasInfozShift(); }
void set_size(size_t s) {
size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
}
void set_empty_soo() {
AssertInSooMode();
size_ = 0;
}
void set_full_soo() {
AssertInSooMode();
size_ = size_t{1} << HasInfozShift();
}
void increment_size() {
assert(size() < capacity());
size_ += size_t{1} << HasInfozShift();
......@@ -1148,6 +1250,8 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The number of slots we can still fill without needing to rehash.
// This is stored in the heap allocation before the control bytes.
// TODO(b/289225379): experiment with moving growth_left back inline to
// increase room for SOO.
size_t growth_left() const {
const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
......@@ -1184,10 +1288,6 @@ class CommonFields : public CommonFieldsGenerationInfo {
return CommonFieldsGenerationInfo::
should_rehash_for_bug_detection_on_move(control(), capacity());
}
void maybe_increment_generation_on_move() {
if (capacity() == 0) return;
increment_generation();
}
void reset_reserved_growth(size_t reservation) {
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
}
......@@ -1198,6 +1298,14 @@ class CommonFields : public CommonFieldsGenerationInfo {
.alloc_size(slot_size);
}
// Move fields other than heap_or_soo_.
void move_non_heap_or_soo_fields(CommonFields& that) {
static_cast<CommonFieldsGenerationInfo&>(*this) =
std::move(static_cast<CommonFieldsGenerationInfo&>(that));
capacity_ = that.capacity_;
size_ = that.size_;
}
// Returns the number of control bytes set to kDeleted. For testing only.
size_t TombstonesCount() const {
return static_cast<size_t>(
......@@ -1211,21 +1319,12 @@ class CommonFields : public CommonFieldsGenerationInfo {
return (size_t{1} << HasInfozShift()) - 1;
}
// TODO(b/182800944): Investigate removing some of these fields:
// - control/slots can be derived from each other
// The control bytes (and, also, a pointer near to the base of the backing
// array).
//
// This contains `capacity + 1 + NumClonedBytes()` entries, even
// when the table is empty (hence EmptyGroup).
//
// Note that growth_left is stored immediately before this pointer.
ctrl_t* control_ = EmptyGroup();
// The beginning of the slots, located at `SlotOffset()` bytes after
// `control`. May be null for empty tables.
void* slots_ = nullptr;
// We can't assert that SOO is enabled because we don't have SooEnabled(), but
// we assert what we can.
void AssertInSooMode() const {
assert(capacity() == SooCapacity());
assert(!has_infoz());
}
// The number of slots in the backing array. This is always 2^N-1 for an
// integer N. NOTE: we tried experimenting with compressing the capacity and
......@@ -1233,10 +1332,16 @@ class CommonFields : public CommonFieldsGenerationInfo {
// power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
// size_ and storing size in the low bits. Both of these experiments were
// regressions, presumably because we need capacity to do find operations.
size_t capacity_ = 0;
size_t capacity_;
// The size and also has one bit that stores whether we have infoz.
size_t size_ = 0;
// TODO(b/289225379): we could put size_ into HeapOrSoo and make capacity_
// encode the size in SOO case. We would be making size()/capacity() more
// expensive in order to have more SOO space.
size_t size_;
// Either the control/slots pointers or the SOO slot.
HeapOrSoo heap_or_soo_;
};
template <class Policy, class Hash, class Eq, class Alloc>
......@@ -1399,6 +1504,10 @@ inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
const void* const& slot_b) {
// If either control byte is null, then we can't tell.
if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
const bool a_is_soo = IsSooControl(ctrl_a);
if (a_is_soo != IsSooControl(ctrl_b)) return false;
if (a_is_soo) return slot_a == slot_b;
const void* low_slot = slot_a;
const void* hi_slot = slot_b;
if (ctrl_a > ctrl_b) {
......@@ -1422,41 +1531,45 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
// - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
// - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
// the chances that the hot paths will be inlined.
// fail_if(is_invalid, message) crashes when is_invalid is true and provides
// an error message based on `message`.
const auto fail_if = [](bool is_invalid, const char* message) {
if (ABSL_PREDICT_FALSE(is_invalid)) {
ABSL_RAW_LOG(FATAL, "Invalid iterator comparison. %s", message);
}
};
const bool a_is_default = ctrl_a == EmptyGroup();
const bool b_is_default = ctrl_b == EmptyGroup();
if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
ABSL_RAW_LOG(
FATAL,
"Invalid iterator comparison. Comparing default-constructed iterator "
"with non-default-constructed iterator.");
}
if (a_is_default && b_is_default) return;
fail_if(a_is_default != b_is_default,
"Comparing default-constructed hashtable iterator with a "
"non-default-constructed hashtable iterator.");
if (SwisstableGenerationsEnabled()) {
if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
// Users don't need to know whether the tables are SOO so don't mention SOO
// in the debug message.
const bool a_is_soo = IsSooControl(ctrl_a);
const bool b_is_soo = IsSooControl(ctrl_b);
fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo),
"Comparing iterators from different hashtables.");
const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
if (a_is_empty != b_is_empty) {
ABSL_RAW_LOG(FATAL,
"Invalid iterator comparison. Comparing iterator from a "
"non-empty hashtable with an iterator from an empty "
"hashtable.");
}
if (a_is_empty && b_is_empty) {
ABSL_RAW_LOG(FATAL,
"Invalid iterator comparison. Comparing iterators from "
"different empty hashtables.");
}
fail_if(a_is_empty != b_is_empty,
"Comparing an iterator from an empty hashtable with an iterator "
"from a non-empty hashtable.");
fail_if(a_is_empty && b_is_empty,
"Comparing iterators from different empty hashtables.");
const bool a_is_end = ctrl_a == nullptr;
const bool b_is_end = ctrl_b == nullptr;
if (a_is_end || b_is_end) {
ABSL_RAW_LOG(FATAL,
"Invalid iterator comparison. Comparing iterator with an "
"end() iterator from a different hashtable.");
}
ABSL_RAW_LOG(FATAL,
"Invalid iterator comparison. Comparing non-end() iterators "
"from different hashtables.");
fail_if(a_is_end || b_is_end,
"Comparing iterator with an end() iterator from a different "
"hashtable.");
fail_if(true, "Comparing non-end() iterators from different hashtables.");
} else {
ABSL_HARDENING_ASSERT(
AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
......@@ -1548,31 +1661,49 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) {
SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
}
// Sets `ctrl[i]` to `h`.
//
// Unlike setting it directly, this function will perform bounds checks and
// mirror the value to the cloned tail if necessary.
inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
size_t slot_size) {
const size_t capacity = common.capacity();
assert(i < capacity);
auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
// Sets sanitizer poisoning for slot corresponding to control byte being set.
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
size_t slot_size) {
assert(i < c.capacity());
auto* slot_i = static_cast<const char*>(c.slot_array()) + i * slot_size;
if (IsFull(h)) {
SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
} else {
SanitizerPoisonMemoryRegion(slot_i, slot_size);
}
}
ctrl_t* ctrl = common.control();
// Sets `ctrl[i]` to `h`.
//
// Unlike setting it directly, this function will perform bounds checks and
// mirror the value to the cloned tail if necessary.
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
size_t slot_size) {
DoSanitizeOnSetCtrl(c, i, h, slot_size);
ctrl_t* ctrl = c.control();
ctrl[i] = h;
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
ctrl[((i - NumClonedBytes()) & c.capacity()) +
(NumClonedBytes() & c.capacity())] = h;
}
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {
SetCtrl(c, i, static_cast<ctrl_t>(h), slot_size);
}
// Like SetCtrl, but in a single group table, we can save some operations when
// setting the cloned control byte.
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
size_t slot_size) {
assert(is_single_group(c.capacity()));
DoSanitizeOnSetCtrl(c, i, h, slot_size);
ctrl_t* ctrl = c.control();
ctrl[i] = h;
ctrl[i + c.capacity() + 1] = h;
}
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
size_t slot_size) {
SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
size_t slot_size) {
SetCtrlInSingleGroupTable(c, i, static_cast<ctrl_t>(h), slot_size);
}
// growth_left (which is a size_t) is stored with the backing array.
......@@ -1633,10 +1764,11 @@ ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
// See GrowIntoSingleGroupShuffleControlBytes for details.
class HashSetResizeHelper {
public:
explicit HashSetResizeHelper(CommonFields& c)
: old_ctrl_(c.control()),
old_capacity_(c.capacity()),
had_infoz_(c.has_infoz()) {}
explicit HashSetResizeHelper(CommonFields& c, bool was_soo, bool had_soo_slot)
: old_capacity_(c.capacity()),
had_infoz_(c.has_infoz()),
was_soo_(was_soo),
had_soo_slot_(had_soo_slot) {}
// Optimized for small groups version of `find_first_non_full`.
// Beneficial only right after calling `raw_hash_set::resize`.
......@@ -1667,9 +1799,25 @@ class HashSetResizeHelper {
return FindInfo{offset, 0};
}
ctrl_t* old_ctrl() const { return old_ctrl_; }
HeapOrSoo& old_heap_or_soo() { return old_heap_or_soo_; }
void* old_soo_data() { return old_heap_or_soo_.soo_data; }
ctrl_t* old_ctrl() const {
assert(!was_soo_);
return old_heap_or_soo_.heap.control;
}
void* old_slots() const {
assert(!was_soo_);
return old_heap_or_soo_.heap.slot_array.p;
}
size_t old_capacity() const { return old_capacity_; }
// Returns the index of the SOO slot when growing from SOO to non-SOO in a
// single group. See also InitControlBytesAfterSoo(). It's important to use
// index 1 so that when resizing from capacity 1 to 3, we can still have
// random iteration order between the first two inserted elements.
// I.e. it allows inserting the second element at either index 0 or 2.
static size_t SooSlotIndex() { return 1; }
// Allocates a backing array for the hashtable.
// Reads `capacity` and updates all other fields based on the result of
// the allocation.
......@@ -1705,8 +1853,8 @@ class HashSetResizeHelper {
// Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
size_t AlignOfSlot>
ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots,
Alloc alloc) {
ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, Alloc alloc,
ctrl_t soo_slot_h2) {
assert(c.capacity());
// Folks with custom allocators often make unwarranted assumptions about the
// behavior of their classes vis-a-vis trivial destructability and what
......@@ -1715,9 +1863,13 @@ class HashSetResizeHelper {
// a workaround while we plan the exact guarantee we want to provide.
const size_t sample_size =
(std::is_same<Alloc, std::allocator<char>>::value &&
c.slot_array() == nullptr)
(was_soo_ || old_capacity_ == 0))
? SizeOfSlot
: 0;
// TODO(b/289225379): when SOO is enabled, we should still sample on first
// insertion and if Sample is non-null, then we should force a heap
// allocation. Note that we'll also have to force capacity of 3 so that
// is_soo() still works.
HashtablezInfoHandle infoz =
sample_size > 0 ? Sample(sample_size) : c.infoz();
......@@ -1735,10 +1887,15 @@ class HashSetResizeHelper {
const bool grow_single_group =
IsGrowingIntoSingleGroupApplicable(old_capacity_, layout.capacity());
if (old_capacity_ != 0 && grow_single_group) {
if (was_soo_ && grow_single_group) {
InitControlBytesAfterSoo(c.control(), soo_slot_h2, layout.capacity());
if (TransferUsesMemcpy && had_soo_slot_) {
TransferSlotAfterSoo(c, SizeOfSlot);
}
} else if (old_capacity_ != 0 && grow_single_group) {
if (TransferUsesMemcpy) {
GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot);
DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots);
GrowSizeIntoSingleGroupTransferable(c, SizeOfSlot);
DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot);
} else {
GrowIntoSingleGroupShuffleControlBytes(c.control(), layout.capacity());
}
......@@ -1749,7 +1906,7 @@ class HashSetResizeHelper {
c.set_has_infoz(has_infoz);
if (has_infoz) {
infoz.RecordStorageChanged(c.size(), layout.capacity());
if (grow_single_group || old_capacity_ == 0) {
if (was_soo_ || grow_single_group || old_capacity_ == 0) {
infoz.RecordRehash(0);
}
c.set_infoz(infoz);
......@@ -1763,21 +1920,22 @@ class HashSetResizeHelper {
// PRECONDITIONS:
// 1. GrowIntoSingleGroupShuffleControlBytes was already called.
template <class PolicyTraits, class Alloc>
void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref,
typename PolicyTraits::slot_type* old_slots) {
void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref) {
assert(old_capacity_ < Group::kWidth / 2);
assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
using slot_type = typename PolicyTraits::slot_type;
assert(is_single_group(c.capacity()));
auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
auto* old_slots_ptr = reinterpret_cast<slot_type*>(old_slots());
size_t shuffle_bit = old_capacity_ / 2 + 1;
for (size_t i = 0; i < old_capacity_; ++i) {
if (IsFull(old_ctrl_[i])) {
if (IsFull(old_ctrl()[i])) {
size_t new_i = i ^ shuffle_bit;
SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i);
PolicyTraits::transfer(&alloc_ref, new_slots + new_i,
old_slots_ptr + i);
}
}
PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
......@@ -1785,11 +1943,11 @@ class HashSetResizeHelper {
// Deallocates old backing array.
template <size_t AlignOfSlot, class CharAlloc>
void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) {
SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
void DeallocateOld(CharAlloc alloc_ref, size_t slot_size) {
SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
auto layout = RawHashSetLayout(old_capacity_, AlignOfSlot, had_infoz_);
Deallocate<BackingArrayAlignment(AlignOfSlot)>(
&alloc_ref, old_ctrl_ - layout.control_offset(),
&alloc_ref, old_ctrl() - layout.control_offset(),
layout.alloc_size(slot_size));
}
......@@ -1805,8 +1963,12 @@ class HashSetResizeHelper {
// Relocates control bytes and slots into new single group for
// transferable objects.
// Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots,
size_t slot_size);
void GrowSizeIntoSingleGroupTransferable(CommonFields& c, size_t slot_size);
// If there was an SOO slot and slots are transferable, transfers the SOO slot
// into the new heap allocation. Must be called only if
// IsGrowingIntoSingleGroupApplicable returned true.
void TransferSlotAfterSoo(CommonFields& c, size_t slot_size);
// Shuffle control bits deterministically to the next capacity.
// Returns offset for newly added element with given hash.
......@@ -1839,6 +2001,13 @@ class HashSetResizeHelper {
void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
size_t new_capacity) const;
// If the table was SOO, initializes new control bytes. `h2` is the control
// byte corresponding to the full slot. Must be called only if
// IsGrowingIntoSingleGroupApplicable returned true.
// Requires: `had_soo_slot_ || h2 == ctrl_t::kEmpty`.
void InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
size_t new_capacity);
// Shuffle trivially transferable slots in the way consistent with
// GrowIntoSingleGroupShuffleControlBytes.
//
......@@ -1852,8 +2021,7 @@ class HashSetResizeHelper {
// 1. new_slots are transferred from old_slots_ consistent with
// GrowIntoSingleGroupShuffleControlBytes.
// 2. Empty new_slots are *not* poisoned.
void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots,
void* new_slots,
void GrowIntoSingleGroupShuffleTransferableSlots(void* new_slots,
size_t slot_size) const;
// Poison empty slots that were transferred using the deterministic algorithm
......@@ -1873,11 +2041,22 @@ class HashSetResizeHelper {
}
}
ctrl_t* old_ctrl_;
HeapOrSoo old_heap_or_soo_;
size_t old_capacity_;
bool had_infoz_;
bool was_soo_;
bool had_soo_slot_;
};
inline void PrepareInsertCommon(CommonFields& common) {
common.increment_size();
common.maybe_increment_generation_on_insert();
}
// Like prepare_insert, but for the case of inserting into a full SOO table.
size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
CommonFields& common);
// PolicyFunctions bundles together some information for a particular
// raw_hash_set<T, ...> instantiation. This information is passed to
// type-erased functions that want to do small amounts of type-specific
......@@ -1899,7 +2078,7 @@ struct PolicyFunctions {
// or creating a new one based on the value of "reuse".
// REQUIRES: c.capacity > 0
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse);
bool reuse, bool soo_enabled);
// Type-erased version of raw_hash_set::erase_meta_only.
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
......@@ -1988,6 +2167,31 @@ class raw_hash_set {
using key_arg = typename KeyArgImpl::template type<K, key_type>;
private:
// TODO(b/289225379): we could add extra SOO space inside raw_hash_set
// after CommonFields to allow inlining larger slot_types (e.g. std::string),
// but it's a bit complicated if we want to support incomplete mapped_type in
// flat_hash_map. We could potentially do this for flat_hash_set and for an
// allowlist of `mapped_type`s of flat_hash_map that includes e.g. arithmetic
// types, strings, cords, and pairs/tuples of allowlisted types.
constexpr static bool SooEnabled() {
return PolicyTraits::soo_enabled() &&
sizeof(slot_type) <= sizeof(HeapOrSoo) &&
alignof(slot_type) <= alignof(HeapOrSoo);
}
// TODO(b/289225379): this is used for pretty printing in GDB/LLDB, but if we
// use this instead of SooEnabled(), then we get compile errors in some OSS
// compilers due to incomplete mapped_type in flat_hash_map. We need to
// resolve this before launching SOO.
// constexpr static bool kSooEnabled = SooEnabled();
// Whether `size` fits in the SOO capacity of this table.
bool fits_in_soo(size_t size) const {
return SooEnabled() && size <= SooCapacity();
}
// Whether this table is in SOO mode or non-SOO mode.
bool is_soo() const { return fits_in_soo(capacity()); }
bool is_full_soo() const { return is_soo() && !empty(); }
// Give an early error when key_type is not hashable/eq.
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
......@@ -2102,6 +2306,18 @@ class raw_hash_set {
// not equal to any end iterator.
ABSL_ASSUME(ctrl != nullptr);
}
// This constructor is used in begin() to avoid an MSan
// use-of-uninitialized-value error. Delegating from this constructor to
// the previous one doesn't avoid the error.
iterator(ctrl_t* ctrl, MaybeInitializedPtr slot,
const GenerationType* generation_ptr)
: HashSetIteratorGenerationInfo(generation_ptr),
ctrl_(ctrl),
slot_(to_slot(slot.p)) {
// This assumption helps the compiler know that any non-end iterator is
// not equal to any end iterator.
ABSL_ASSUME(ctrl != nullptr);
}
// For end() iterators.
explicit iterator(const GenerationType* generation_ptr)
: HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
......@@ -2202,8 +2418,9 @@ class raw_hash_set {
size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: settings_(CommonFields{}, hash, eq, alloc) {
if (bucket_count) {
: settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
alloc) {
if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) {
resize(NormalizeCapacity(bucket_count));
}
}
......@@ -2310,6 +2527,15 @@ class raw_hash_set {
if (size == 0) {
return;
}
// We don't use `that.is_soo()` here because `that` can have non-SOO
// capacity but have a size that fits into SOO capacity.
if (fits_in_soo(size)) {
assert(size == 1);
common().set_full_soo();
emplace_at(soo_iterator(), *that.begin());
return;
}
assert(!that.is_soo());
const size_t cap = capacity();
// Note about single group tables:
// 1. It is correct to have any order of elements.
......@@ -2367,16 +2593,22 @@ class raw_hash_set {
// would create a nullptr functor that cannot be called.
// TODO(b/296061262): move instead of copying hash/eq/alloc.
// Note: we avoid using exchange for better generated code.
settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
that.alloc_ref()) {
that.common() = CommonFields{};
settings_(PolicyTraits::transfer_uses_memcpy() || !that.is_full_soo()
? std::move(that.common())
: CommonFields{full_soo_tag_t{}},
that.hash_ref(), that.eq_ref(), that.alloc_ref()) {
if (!PolicyTraits::transfer_uses_memcpy() && that.is_full_soo()) {
transfer(soo_slot(), that.soo_slot());
}
that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move();
}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
: settings_(CommonFields::CreateDefault<SooEnabled()>(), that.hash_ref(),
that.eq_ref(), a) {
if (a == that.alloc_ref()) {
std::swap(common(), that.common());
swap_common(that);
maybe_increment_generation_or_rehash_on_move();
} else {
move_elements_allocs_unequal(std::move(that));
......@@ -2411,9 +2643,10 @@ class raw_hash_set {
~raw_hash_set() { destructor_impl(); }
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
// TODO(b/324478958): Consider reverting if no impact.
if (ABSL_PREDICT_FALSE(empty())) return end();
auto it = iterator_at(0);
if (is_soo()) return soo_iterator();
iterator it = {control(), common().slots_union(),
common().generation_ptr()};
it.skip_empty_or_deleted();
assert(IsFull(*it.control()));
return it;
......@@ -2435,7 +2668,15 @@ class raw_hash_set {
bool empty() const { return !size(); }
size_t size() const { return common().size(); }
size_t capacity() const { return common().capacity(); }
size_t capacity() const {
const size_t cap = common().capacity();
// Use local variables because compiler complains about using functions in
// assume.
static constexpr bool kSooEnabled = SooEnabled();
static constexpr size_t kSooCapacity = SooCapacity();
ABSL_ASSUME(!kSooEnabled || cap >= kSooCapacity);
return cap;
}
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
......@@ -2449,9 +2690,13 @@ class raw_hash_set {
const size_t cap = capacity();
if (cap == 0) {
// Already guaranteed to be empty; so nothing to do.
} else if (is_soo()) {
if (!empty()) destroy(soo_slot());
common().set_empty_soo();
} else {
destroy_slots();
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128,
SooEnabled());
}
common().set_reserved_growth(0);
common().set_reservation_size(0);
......@@ -2582,7 +2827,7 @@ class raw_hash_set {
std::pair<iterator, bool> emplace(Args&&... args)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
slot_type* slot = to_slot(&raw);
construct(slot, std::forward<Args>(args)...);
const auto& elem = PolicyTraits::element(slot);
......@@ -2690,7 +2935,11 @@ class raw_hash_set {
void erase(iterator it) {
AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
destroy(it.slot());
erase_meta_only(it);
if (is_soo()) {
common().set_empty_soo();
} else {
erase_meta_only(it);
}
}
iterator erase(const_iterator first,
......@@ -2698,12 +2947,19 @@ class raw_hash_set {
// We check for empty first because ClearBackingArray requires that
// capacity() > 0 as a precondition.
if (empty()) return end();
if (first == last) return last.inner_;
if (is_soo()) {
destroy(soo_slot());
common().set_empty_soo();
return end();
}
if (first == begin() && last == end()) {
// TODO(ezb): we access control bytes in destroy_slots so it could make
// sense to combine destroy_slots and ClearBackingArray to avoid cache
// misses when the table is large. Note that we also do this in clear().
destroy_slots();
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true,
SooEnabled());
common().set_reserved_growth(common().reservation_size());
return end();
}
......@@ -2718,13 +2974,21 @@ class raw_hash_set {
template <typename H, typename E>
void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
assert(this != &src);
// Returns whether insertion took place.
const auto insert_slot = [this](slot_type* src_slot) {
return PolicyTraits::apply(InsertSlot<false>{*this, std::move(*src_slot)},
PolicyTraits::element(src_slot))
.second;
};
if (src.is_soo()) {
if (src.empty()) return;
if (insert_slot(src.soo_slot())) src.common().set_empty_soo();
return;
}
for (auto it = src.begin(), e = src.end(); it != e;) {
auto next = std::next(it);
if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
PolicyTraits::element(it.slot()))
.second) {
src.erase_meta_only(it);
}
if (insert_slot(it.slot())) src.erase_meta_only(it);
it = next;
}
}
......@@ -2738,7 +3002,11 @@ class raw_hash_set {
AssertIsFull(position.control(), position.inner_.generation(),
position.inner_.generation_ptr(), "extract()");
auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
erase_meta_only(position);
if (is_soo()) {
common().set_empty_soo();
} else {
erase_meta_only(position);
}
return node;
}
......@@ -2755,7 +3023,7 @@ class raw_hash_set {
IsNoThrowSwappable<allocator_type>(
typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
swap(common(), that.common());
swap_common(that);
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
SwapAlloc(alloc_ref(), that.alloc_ref(),
......@@ -2763,17 +3031,31 @@ class raw_hash_set {
}
void rehash(size_t n) {
if (n == 0 && capacity() == 0) return;
if (n == 0 && size() == 0) {
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
return;
const size_t cap = capacity();
if (n == 0) {
if (cap == 0 || is_soo()) return;
if (empty()) {
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
SooEnabled());
return;
}
if (SooEnabled() && size() <= SooCapacity()) {
alignas(slot_type) unsigned char slot_space[sizeof(slot_type)];
slot_type* tmp_slot = to_slot(slot_space);
transfer(tmp_slot, begin().slot());
ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false,
SooEnabled());
transfer(soo_slot(), tmp_slot);
common().set_full_soo();
return;
}
}
// bitor is a faster way of doing `max` here. We will round up to the next
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity()) {
if (n == 0 || m > cap) {
resize(m);
// This is after resize, to ensure that we have completed the allocation
......@@ -2783,7 +3065,9 @@ class raw_hash_set {
}
void reserve(size_t n) {
if (n > size() + growth_left()) {
const size_t max_size_before_growth =
is_soo() ? SooCapacity() : size() + growth_left();
if (n > max_size_before_growth) {
size_t m = GrowthToLowerboundCapacity(n);
resize(NormalizeCapacity(m));
......@@ -2816,6 +3100,7 @@ class raw_hash_set {
// specific benchmarks indicating its importance.
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
if (SooEnabled() ? is_soo() : capacity() == 0) return;
(void)key;
// Avoid probing if we won't be able to prefetch the addresses received.
#ifdef ABSL_HAVE_PREFETCH
......@@ -2836,26 +3121,14 @@ class raw_hash_set {
template <class K = key_type>
iterator find(const key_arg<K>& key,
size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto seq = probe(common(), hash);
slot_type* slot_ptr = slot_array();
const ctrl_t* ctrl = control();
while (true) {
Group g{ctrl + seq.offset()};
for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slot_ptr + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
assert(seq.index() <= capacity() && "full table!");
}
if (is_soo()) return find_soo(key);
return find_non_soo(key, hash);
}
template <class K = key_type>
iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (is_soo()) return find_soo(key);
prefetch_heap_block();
return find(key, hash_ref()(key));
return find_non_soo(key, hash_ref()(key));
}
template <class K = key_type>
......@@ -3007,7 +3280,39 @@ class raw_hash_set {
PolicyTraits::transfer(&alloc_ref(), to, from);
}
// TODO(b/289225379): consider having a helper class that has the impls for
// SOO functionality.
template <class K = key_type>
iterator find_soo(const key_arg<K>& key) {
assert(is_soo());
return empty() || !PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
PolicyTraits::element(soo_slot()))
? end()
: soo_iterator();
}
template <class K = key_type>
iterator find_non_soo(const key_arg<K>& key, size_t hash) {
assert(!is_soo());
auto seq = probe(common(), hash);
slot_type* slot_ptr = slot_array();
const ctrl_t* ctrl = control();
while (true) {
Group g{ctrl + seq.offset()};
for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slot_ptr + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
assert(seq.index() <= capacity() && "full table!");
}
}
inline void destroy_slots() {
assert(!is_soo());
if (PolicyTraits::template destroy_is_trivial<Alloc>()) return;
IterateOverFullSlots(
common(), slot_array(),
......@@ -3027,6 +3332,10 @@ class raw_hash_set {
inline void destructor_impl() {
if (capacity() == 0) return;
if (is_soo()) {
if (!empty()) destroy(soo_slot());
return;
}
destroy_slots();
dealloc();
}
......@@ -3036,10 +3345,16 @@ class raw_hash_set {
// This merely updates the pertinent control byte. This can be used in
// conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
assert(!is_soo());
EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
sizeof(slot_type));
}
size_t hash_of(slot_type* slot) const {
return PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slot));
}
// Resizes table to the new capacity and move all elements to the new
// positions accordingly.
//
......@@ -3050,8 +3365,23 @@ class raw_hash_set {
// can be called right after `resize`.
ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
assert(IsValidCapacity(new_capacity));
HashSetResizeHelper resize_helper(common());
auto* old_slots = slot_array();
assert(!fits_in_soo(new_capacity));
const bool was_soo = is_soo();
const bool had_soo_slot = was_soo && !empty();
const ctrl_t soo_slot_h2 =
had_soo_slot ? static_cast<ctrl_t>(H2(hash_of(soo_slot())))
: ctrl_t::kEmpty;
HashSetResizeHelper resize_helper(common(), was_soo, had_soo_slot);
// Initialize HashSetResizeHelper::old_heap_or_soo_. We can't do this in
// HashSetResizeHelper constructor because it can't transfer slots when
// transfer_uses_memcpy is false.
// TODO(b/289225379): try to handle more of the SOO cases inside
// InitializeSlots. See comment on cl/555990034 snapshot #63.
if (PolicyTraits::transfer_uses_memcpy() || !had_soo_slot) {
resize_helper.old_heap_or_soo() = common().heap_or_soo();
} else {
transfer(to_slot(resize_helper.old_soo_data()), soo_slot());
}
common().set_capacity(new_capacity);
// Note that `InitializeSlots` does different number initialization steps
// depending on the values of `transfer_uses_memcpy` and capacities.
......@@ -3060,43 +3390,60 @@ class raw_hash_set {
resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
PolicyTraits::transfer_uses_memcpy(),
alignof(slot_type)>(
common(), const_cast<std::remove_const_t<slot_type>*>(old_slots),
CharAlloc(alloc_ref()));
common(), CharAlloc(alloc_ref()), soo_slot_h2);
if (resize_helper.old_capacity() == 0) {
// In the SooEnabled() case, capacity is never 0 so we don't check.
if (!SooEnabled() && resize_helper.old_capacity() == 0) {
// InitializeSlots did all the work including infoz().RecordRehash().
return;
}
assert(resize_helper.old_capacity() > 0);
// Nothing more to do in this case.
if (was_soo && !had_soo_slot) return;
slot_type* new_slots = slot_array();
if (grow_single_group) {
if (PolicyTraits::transfer_uses_memcpy()) {
// InitializeSlots did all the work.
return;
}
// We want GrowSizeIntoSingleGroup to be called here in order to make
// InitializeSlots not depend on PolicyTraits.
resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(),
old_slots);
if (was_soo) {
transfer(new_slots + resize_helper.SooSlotIndex(),
to_slot(resize_helper.old_soo_data()));
return;
} else {
// We want GrowSizeIntoSingleGroup to be called here in order to make
// InitializeSlots not depend on PolicyTraits.
resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(),
alloc_ref());
}
} else {
// InitializeSlots prepares control bytes to correspond to empty table.
auto* new_slots = slot_array();
size_t total_probe_length = 0;
for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
if (IsFull(resize_helper.old_ctrl()[i])) {
size_t hash = PolicyTraits::apply(
HashElement{hash_ref()}, PolicyTraits::element(old_slots + i));
auto target = find_first_non_full(common(), hash);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
transfer(new_slots + new_i, old_slots + i);
const auto insert_slot = [&](slot_type* slot) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slot));
auto target = find_first_non_full(common(), hash);
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
transfer(new_slots + target.offset, slot);
return target.probe_length;
};
if (was_soo) {
insert_slot(to_slot(resize_helper.old_soo_data()));
return;
} else {
auto* old_slots =
reinterpret_cast<slot_type*>(resize_helper.old_slots());
size_t total_probe_length = 0;
for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
if (IsFull(resize_helper.old_ctrl()[i])) {
total_probe_length += insert_slot(old_slots + i);
}
}
infoz().RecordRehash(total_probe_length);
}
infoz().RecordRehash(total_probe_length);
}
resize_helper.DeallocateOld<alignof(slot_type)>(
CharAlloc(alloc_ref()), sizeof(slot_type),
const_cast<std::remove_const_t<slot_type>*>(old_slots));
resize_helper.DeallocateOld<alignof(slot_type)>(CharAlloc(alloc_ref()),
sizeof(slot_type));
}
// Prunes control bytes to remove as many tombstones as possible.
......@@ -3166,8 +3513,46 @@ class raw_hash_set {
}
}
// Casting directly from e.g. char* to slot_type* can cause compilation errors
// on objective-C. This function converts to void* first, avoiding the issue.
static slot_type* to_slot(void* buf) {
return reinterpret_cast<slot_type*>(buf);
}
// Requires that lhs does not have a full SOO slot.
static void move_common(bool that_is_full_soo, allocator_type& rhs_alloc,
CommonFields& lhs, CommonFields&& rhs) {
if (PolicyTraits::transfer_uses_memcpy() || !that_is_full_soo) {
lhs = std::move(rhs);
} else {
lhs.move_non_heap_or_soo_fields(rhs);
// TODO(b/303305702): add reentrancy guard.
PolicyTraits::transfer(&rhs_alloc, to_slot(lhs.soo_data()),
to_slot(rhs.soo_data()));
}
}
// Swaps common fields making sure to avoid memcpy'ing a full SOO slot if we
// aren't allowed to do so.
void swap_common(raw_hash_set& that) {
using std::swap;
if (PolicyTraits::transfer_uses_memcpy()) {
swap(common(), that.common());
return;
}
CommonFields tmp = CommonFields::CreateDefault<SooEnabled()>();
const bool that_is_full_soo = that.is_full_soo();
move_common(that_is_full_soo, that.alloc_ref(), tmp,
std::move(that.common()));
move_common(is_full_soo(), alloc_ref(), that.common(), std::move(common()));
move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
}
void maybe_increment_generation_or_rehash_on_move() {
common().maybe_increment_generation_on_move();
if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) {
return;
}
common().increment_generation();
if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
resize(capacity());
}
......@@ -3178,13 +3563,14 @@ class raw_hash_set {
// We don't bother checking for this/that aliasing. We just need to avoid
// breaking the invariants in that case.
destructor_impl();
common() = std::move(that.common());
move_common(that.is_full_soo(), that.alloc_ref(), common(),
std::move(that.common()));
// TODO(b/296061262): move instead of copying hash/eq/alloc.
hash_ref() = that.hash_ref();
eq_ref() = that.eq_ref();
CopyAlloc(alloc_ref(), that.alloc_ref(),
std::integral_constant<bool, propagate_alloc>());
that.common() = CommonFields{};
that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move();
return *this;
}
......@@ -3197,8 +3583,8 @@ class raw_hash_set {
insert(std::move(PolicyTraits::element(it.slot())));
that.destroy(it.slot());
}
that.dealloc();
that.common() = CommonFields{};
if (!that.is_soo()) that.dealloc();
that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move();
return *this;
}
......@@ -3230,6 +3616,20 @@ class raw_hash_set {
// `key`'s H2. Returns a bool indicating whether an insertion can take place.
template <class K>
std::pair<iterator, bool> find_or_prepare_insert(const K& key) {
if (is_soo()) {
if (empty()) {
common().set_full_soo();
return {soo_iterator(), true};
}
if (PolicyTraits::apply(EqualElement<K>{key, eq_ref()},
PolicyTraits::element(soo_slot()))) {
return {soo_iterator(), false};
}
resize(NextCapacity(SooCapacity()));
const size_t index =
PrepareInsertAfterSoo(hash_ref()(key), sizeof(slot_type), common());
return {iterator_at(index), true};
}
prefetch_heap_block();
auto hash = hash_ref()(key);
auto seq = probe(common(), hash);
......@@ -3254,6 +3654,7 @@ class raw_hash_set {
//
// REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
assert(!is_soo());
const bool rehash_for_bug_detection =
common().should_rehash_for_bug_detection_on_insert();
if (rehash_for_bug_detection) {
......@@ -3277,10 +3678,9 @@ class raw_hash_set {
} else {
target = find_first_non_full(common(), hash);
}
common().increment_size();
PrepareInsertCommon(common());
set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
common().maybe_increment_generation_on_insert();
infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
......@@ -3305,7 +3705,7 @@ class raw_hash_set {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {control() + i, slot_array() + i, common().generation_ptr()};
return const_cast<raw_hash_set*>(this)->iterator_at(i);
}
reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
......@@ -3323,13 +3723,20 @@ class raw_hash_set {
// side-effect.
//
// See `CapacityToGrowth()`.
size_t growth_left() const { return common().growth_left(); }
void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
size_t growth_left() const {
assert(!is_soo());
return common().growth_left();
}
void set_growth_left(size_t gl) {
assert(!is_soo());
return common().set_growth_left(gl);
}
// Prefetch the heap-allocated memory region to resolve potential TLB and
// cache misses. This is intended to overlap with execution of calculating the
// hash for a key.
void prefetch_heap_block() const {
if (is_soo()) return;
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
__builtin_prefetch(control(), 0, 1);
#endif
......@@ -3338,11 +3745,43 @@ class raw_hash_set {
CommonFields& common() { return settings_.template get<0>(); }
const CommonFields& common() const { return settings_.template get<0>(); }
ctrl_t* control() const { return common().control(); }
ctrl_t* control() const {
assert(!is_soo());
return common().control();
}
slot_type* slot_array() const {
assert(!is_soo());
// Suppress erroneous uninitialized memory errors on GCC. GCC thinks that
// the call to slot_array() in find_or_prepare_insert() is reading
// uninitialized memory, but slot_array is only called there when the table
// is non-empty and this memory is initialized when the table is non-empty.
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#pragma GCC diagnostic ignored "-Wuninitialized"
#endif
return static_cast<slot_type*>(common().slot_array());
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
slot_type* soo_slot() {
assert(is_soo());
return static_cast<slot_type*>(common().soo_data());
}
const slot_type* soo_slot() const {
return reinterpret_cast<raw_hash_set*>(this)->soo_slot();
}
iterator soo_iterator() {
return {SooControl(), soo_slot(), common().generation_ptr()};
}
const_iterator soo_iterator() const {
return reinterpret_cast<raw_hash_set*>(this)->soo_iterator();
}
HashtablezInfoHandle infoz() {
assert(!is_soo());
return common().infoz();
}
HashtablezInfoHandle infoz() { return common().infoz(); }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
......@@ -3390,7 +3829,8 @@ class raw_hash_set {
// fields that occur after CommonFields.
absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
allocator_type>
settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
settings_{CommonFields::CreateDefault<SooEnabled()>(), hasher{},
key_equal{}, allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
......@@ -3416,6 +3856,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t GetNumProbes(const Set& set,
const typename Set::key_type& key) {
if (set.is_soo()) return 0;
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
auto seq = probe(set.common(), hash);
......@@ -3439,7 +3880,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity();
if (capacity == 0) return 0;
size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
size_t m =
c.is_soo() ? 0 : c.common().alloc_size(sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
......
......@@ -394,7 +394,7 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
}
}
template <class T, bool kTransferable = false>
template <class T, bool kTransferable = false, bool kSoo = false>
struct ValuePolicy {
using slot_type = T;
using key_type = T;
......@@ -433,6 +433,8 @@ struct ValuePolicy {
static constexpr HashSlotFn get_hash_slot_fn() {
return nullptr;
}
static constexpr bool soo_enabled() { return kSoo; }
};
using IntPolicy = ValuePolicy<int64_t>;
......@@ -440,6 +442,44 @@ using Uint8Policy = ValuePolicy<uint8_t>;
using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
// For testing SOO.
template <int N>
class SizedValue {
public:
SizedValue(int64_t v) { // NOLINT
vals_[0] = v;
}
SizedValue() : SizedValue(0) {}
SizedValue(const SizedValue&) = default;
SizedValue& operator=(const SizedValue&) = default;
int64_t operator*() const {
// Suppress erroneous uninitialized memory errors on GCC.
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
return vals_[0];
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
explicit operator int() const { return **this; }
explicit operator int64_t() const { return **this; }
template <typename H>
friend H AbslHashValue(H h, SizedValue sv) {
return H::combine(std::move(h), *sv);
}
bool operator==(const SizedValue& rhs) const { return **this == *rhs; }
private:
int64_t vals_[N / sizeof(int64_t)];
};
template <int N, bool kSoo>
using SizedValuePolicy =
ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>;
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
......@@ -517,9 +557,9 @@ struct StringTable
using Base::Base;
};
template <typename T, bool kTransferable = false>
template <typename T, bool kTransferable = false, bool kSoo = false>
struct ValueTable
: raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>,
: raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>,
std::equal_to<T>, std::allocator<T>> {
using Base = typename ValueTable::raw_hash_set;
using Base::Base;
......@@ -530,6 +570,11 @@ using Uint8Table = ValueTable<uint8_t>;
using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8;
static_assert(sizeof(SizedValue<kNonSooSize>) >= kNonSooSize, "too small");
using NonSooIntTable = ValueTable<SizedValue<kNonSooSize>>;
using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>;
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() = default;
......@@ -579,6 +624,16 @@ struct FreezableAlloc : std::allocator<T> {
bool* frozen;
};
template <int N>
struct FreezableSizedValueSooTable
: raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>,
container_internal::hash_default_hash<SizedValue<N>>,
std::equal_to<SizedValue<N>>,
FreezableAlloc<SizedValue<N>>> {
using Base = typename FreezableSizedValueSooTable::raw_hash_set;
using Base::Base;
};
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
......@@ -649,20 +704,25 @@ TEST(Table, EmptyFunctorOptimization) {
std::equal_to<absl::string_view>, std::allocator<int>>));
}
TEST(Table, Empty) {
IntTable t;
template <class TableType>
class SooTest : public testing::Test {};
TYPED_TEST_SUITE_P(SooTest);
TYPED_TEST_P(SooTest, Empty) {
TypeParam t;
EXPECT_EQ(0, t.size());
EXPECT_TRUE(t.empty());
}
TEST(Table, LookupEmpty) {
IntTable t;
TYPED_TEST_P(SooTest, LookupEmpty) {
TypeParam t;
auto it = t.find(0);
EXPECT_TRUE(it == t.end());
}
TEST(Table, Insert1) {
IntTable t;
TYPED_TEST_P(SooTest, Insert1) {
TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
......@@ -671,8 +731,8 @@ TEST(Table, Insert1) {
EXPECT_THAT(*t.find(0), 0);
}
TEST(Table, Insert2) {
IntTable t;
TYPED_TEST_P(SooTest, Insert2) {
TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
......@@ -734,9 +794,9 @@ TEST(Table, InsertCollisionAndFindAfterDelete) {
EXPECT_TRUE(t.empty());
}
TEST(Table, EraseInSmallTables) {
TYPED_TEST_P(SooTest, EraseInSmallTables) {
for (int64_t size = 0; size < 64; ++size) {
IntTable t;
TypeParam t;
for (int64_t i = 0; i < size; ++i) {
t.insert(i);
}
......@@ -751,8 +811,8 @@ TEST(Table, EraseInSmallTables) {
}
}
TEST(Table, InsertWithinCapacity) {
IntTable t;
TYPED_TEST_P(SooTest, InsertWithinCapacity) {
TypeParam t;
t.reserve(10);
const size_t original_capacity = t.capacity();
const auto addr = [&](int i) {
......@@ -841,7 +901,8 @@ TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
ResizeGrowSmallTables, ResizeReduceSmallTables);
using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
using SmallTableTypes =
::testing::Types<IntTable, TransferableIntTable, SooIntTable>;
INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
SmallTableResizeTest, SmallTableTypes);
......@@ -863,14 +924,14 @@ TEST(Table, LazyEmplace) {
EXPECT_THAT(*it, Pair("abc", "ABC"));
}
TEST(Table, ContainsEmpty) {
IntTable t;
TYPED_TEST_P(SooTest, ContainsEmpty) {
TypeParam t;
EXPECT_FALSE(t.contains(0));
}
TEST(Table, Contains1) {
IntTable t;
TYPED_TEST_P(SooTest, Contains1) {
TypeParam t;
EXPECT_TRUE(t.insert(0).second);
EXPECT_TRUE(t.contains(0));
......@@ -880,8 +941,8 @@ TEST(Table, Contains1) {
EXPECT_FALSE(t.contains(0));
}
TEST(Table, Contains2) {
IntTable t;
TYPED_TEST_P(SooTest, Contains2) {
TypeParam t;
EXPECT_TRUE(t.insert(0).second);
EXPECT_TRUE(t.contains(0));
......@@ -1178,8 +1239,8 @@ TEST(Table, RehashWithNoResize) {
}
}
TEST(Table, InsertEraseStressTest) {
IntTable t;
TYPED_TEST_P(SooTest, InsertEraseStressTest) {
TypeParam t;
const size_t kMinElementCount = 250;
std::deque<int> keys;
size_t i = 0;
......@@ -1207,32 +1268,33 @@ TEST(Table, InsertOverloads) {
Pair("DEF", "!!!")));
}
TEST(Table, LargeTable) {
IntTable t;
TYPED_TEST_P(SooTest, LargeTable) {
TypeParam t;
for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
for (int64_t i = 0; i != 100000; ++i)
ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
}
// Timeout if copy is quadratic as it was in Rust.
TEST(Table, EnsureNonQuadraticAsInRust) {
TYPED_TEST_P(SooTest, EnsureNonQuadraticAsInRust) {
static const size_t kLargeSize = 1 << 15;
IntTable t;
TypeParam t;
for (size_t i = 0; i != kLargeSize; ++i) {
t.insert(i);
}
// If this is quadratic, the test will timeout.
IntTable t2;
TypeParam t2;
for (const auto& entry : t) t2.insert(entry);
}
TEST(Table, ClearBug) {
TYPED_TEST_P(SooTest, ClearBug) {
if (SwisstableGenerationsEnabled()) {
GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
}
IntTable t;
TypeParam t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
constexpr size_t max_size = capacity / 2 + 1;
for (size_t i = 0; i < max_size; ++i) {
......@@ -1251,11 +1313,11 @@ TEST(Table, ClearBug) {
// that they are probably still in the same group. This is not strictly
// guaranteed.
EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
capacity * sizeof(IntTable::value_type));
capacity * sizeof(typename TypeParam::value_type));
}
TEST(Table, Erase) {
IntTable t;
TYPED_TEST_P(SooTest, Erase) {
TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
......@@ -1265,8 +1327,8 @@ TEST(Table, Erase) {
EXPECT_TRUE(t.find(0) == t.end());
}
TEST(Table, EraseMaintainsValidIterator) {
IntTable t;
TYPED_TEST_P(SooTest, EraseMaintainsValidIterator) {
TypeParam t;
const int kNumElements = 100;
for (int i = 0; i < kNumElements; i++) {
EXPECT_TRUE(t.emplace(i).second);
......@@ -1284,8 +1346,8 @@ TEST(Table, EraseMaintainsValidIterator) {
EXPECT_EQ(num_erase_calls, kNumElements);
}
TEST(Table, EraseBeginEnd) {
IntTable t;
TYPED_TEST_P(SooTest, EraseBeginEnd) {
TypeParam t;
for (int i = 0; i < 10; ++i) t.insert(i);
EXPECT_EQ(t.size(), 10);
t.erase(t.begin(), t.end());
......@@ -1684,8 +1746,8 @@ TEST(Table, EraseInsertProbing) {
EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
}
TEST(Table, Clear) {
IntTable t;
TYPED_TEST_P(SooTest, Clear) {
TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
t.clear();
EXPECT_TRUE(t.find(0) == t.end());
......@@ -1697,13 +1759,13 @@ TEST(Table, Clear) {
EXPECT_TRUE(t.find(0) == t.end());
}
TEST(Table, Swap) {
IntTable t;
TYPED_TEST_P(SooTest, Swap) {
TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_EQ(1, t.size());
IntTable u;
TypeParam u;
t.swap(u);
EXPECT_EQ(0, t.size());
EXPECT_EQ(1, u.size());
......@@ -1711,8 +1773,8 @@ TEST(Table, Swap) {
EXPECT_THAT(*u.find(0), 0);
}
TEST(Table, Rehash) {
IntTable t;
TYPED_TEST_P(SooTest, Rehash) {
TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
t.emplace(0);
t.emplace(1);
......@@ -1723,8 +1785,8 @@ TEST(Table, Rehash) {
EXPECT_THAT(*t.find(1), 1);
}
TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
IntTable t;
TYPED_TEST_P(SooTest, RehashDoesNotRehashWhenNotNecessary) {
TypeParam t;
t.emplace(0);
t.emplace(1);
auto* p = &*t.find(0);
......@@ -1732,14 +1794,15 @@ TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
EXPECT_EQ(p, &*t.find(0));
}
// Following two tests use non-SOO table because they test for 0 capacity.
TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
IntTable t;
NonSooIntTable t;
t.rehash(0);
EXPECT_EQ(0, t.bucket_count());
}
TEST(Table, RehashZeroDeallocatesEmptyTable) {
IntTable t;
NonSooIntTable t;
t.emplace(0);
t.clear();
EXPECT_NE(0, t.bucket_count());
......@@ -1747,8 +1810,8 @@ TEST(Table, RehashZeroDeallocatesEmptyTable) {
EXPECT_EQ(0, t.bucket_count());
}
TEST(Table, RehashZeroForcesRehash) {
IntTable t;
TYPED_TEST_P(SooTest, RehashZeroForcesRehash) {
TypeParam t;
t.emplace(0);
t.emplace(1);
auto* p = &*t.find(0);
......@@ -1764,33 +1827,33 @@ TEST(Table, ConstructFromInitList) {
StringTable t = {P(), Q(), {}, {{}, {}}};
}
TEST(Table, CopyConstruct) {
IntTable t;
TYPED_TEST_P(SooTest, CopyConstruct) {
TypeParam t;
t.emplace(0);
EXPECT_EQ(1, t.size());
{
IntTable u(t);
TypeParam u(t);
EXPECT_EQ(1, u.size());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u{t};
TypeParam u{t};
EXPECT_EQ(1, u.size());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u = t;
TypeParam u = t;
EXPECT_EQ(1, u.size());
EXPECT_THAT(*u.find(0), 0);
}
}
TEST(Table, CopyDifferentSizes) {
IntTable t;
TYPED_TEST_P(SooTest, CopyDifferentSizes) {
TypeParam t;
for (int i = 0; i < 100; ++i) {
t.emplace(i);
IntTable c = t;
TypeParam c = t;
for (int j = 0; j <= i; ++j) {
ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j;
}
......@@ -1799,16 +1862,16 @@ TEST(Table, CopyDifferentSizes) {
}
}
TEST(Table, CopyDifferentCapacities) {
TYPED_TEST_P(SooTest, CopyDifferentCapacities) {
for (int cap = 1; cap < 100; cap = cap * 2 + 1) {
IntTable t;
TypeParam t;
t.reserve(static_cast<size_t>(cap));
for (int i = 0; i <= cap; ++i) {
t.emplace(i);
if (i != cap && i % 5 != 0) {
continue;
}
IntTable c = t;
TypeParam c = t;
for (int j = 0; j <= i; ++j) {
ASSERT_TRUE(c.find(j) != c.end())
<< "cap=" << cap << " i=" << i << " j=" << j;
......@@ -1948,8 +2011,8 @@ TEST(Table, Equality3) {
EXPECT_NE(u, t);
}
TEST(Table, NumDeletedRegression) {
IntTable t;
TYPED_TEST_P(SooTest, NumDeletedRegression) {
TypeParam t;
t.emplace(0);
t.erase(t.find(0));
// construct over a deleted slot.
......@@ -1957,8 +2020,8 @@ TEST(Table, NumDeletedRegression) {
t.clear();
}
TEST(Table, FindFullDeletedRegression) {
IntTable t;
TYPED_TEST_P(SooTest, FindFullDeletedRegression) {
TypeParam t;
for (int i = 0; i < 1000; ++i) {
t.emplace(i);
t.erase(t.find(i));
......@@ -1966,17 +2029,17 @@ TEST(Table, FindFullDeletedRegression) {
EXPECT_EQ(0, t.size());
}
TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
TYPED_TEST_P(SooTest, ReplacingDeletedSlotDoesNotRehash) {
size_t n;
{
// Compute n such that n is the maximum number of elements before rehash.
IntTable t;
TypeParam t;
t.emplace(0);
size_t c = t.bucket_count();
for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
--n;
}
IntTable t;
TypeParam t;
t.rehash(n);
const size_t c = t.bucket_count();
for (size_t i = 0; i != n; ++i) t.emplace(i);
......@@ -2227,8 +2290,8 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
}
TEST(Nodes, HintInsert) {
IntTable t = {1, 2, 3};
TYPED_TEST_P(SooTest, HintInsert) {
TypeParam t = {1, 2, 3};
auto node = t.extract(1);
EXPECT_THAT(t, UnorderedElementsAre(2, 3));
auto it = t.insert(t.begin(), std::move(node));
......@@ -2247,14 +2310,18 @@ TEST(Nodes, HintInsert) {
EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move)
}
IntTable MakeSimpleTable(size_t size) {
IntTable t;
template <typename T>
T MakeSimpleTable(size_t size) {
T t;
while (t.size() < size) t.insert(t.size());
return t;
}
std::vector<int> OrderOfIteration(const IntTable& t) {
return {t.begin(), t.end()};
template <typename T>
std::vector<int> OrderOfIteration(const T& t) {
std::vector<int> res;
for (auto i : t) res.push_back(static_cast<int>(i));
return res;
}
// These IterationOrderChanges tests depend on non-deterministic behavior.
......@@ -2263,15 +2330,15 @@ std::vector<int> OrderOfIteration(const IntTable& t) {
// we are touching different memory pages to cause the ordering to change.
// We also need to keep the old tables around to avoid getting the same memory
// blocks over and over.
TEST(Table, IterationOrderChangesByInstance) {
TYPED_TEST_P(SooTest, IterationOrderChangesByInstance) {
for (size_t size : {2, 6, 12, 20}) {
const auto reference_table = MakeSimpleTable(size);
const auto reference_table = MakeSimpleTable<TypeParam>(size);
const auto reference = OrderOfIteration(reference_table);
std::vector<IntTable> tables;
std::vector<TypeParam> tables;
bool found_difference = false;
for (int i = 0; !found_difference && i < 5000; ++i) {
tables.push_back(MakeSimpleTable(size));
tables.push_back(MakeSimpleTable<TypeParam>(size));
found_difference = OrderOfIteration(tables.back()) != reference;
}
if (!found_difference) {
......@@ -2282,7 +2349,7 @@ TEST(Table, IterationOrderChangesByInstance) {
}
}
TEST(Table, IterationOrderChangesOnRehash) {
TYPED_TEST_P(SooTest, IterationOrderChangesOnRehash) {
// We test different sizes with many small numbers, because small table
// resize has a different codepath.
// Note: iteration order for size() <= 1 is always the same.
......@@ -2291,10 +2358,10 @@ TEST(Table, IterationOrderChangesOnRehash) {
size_t{0}, // Force rehash is guaranteed.
size * 10 // Rehash to the larger capacity is guaranteed.
}) {
std::vector<IntTable> garbage;
std::vector<TypeParam> garbage;
bool ok = false;
for (int i = 0; i < 5000; ++i) {
auto t = MakeSimpleTable(size);
auto t = MakeSimpleTable<TypeParam>(size);
const auto reference = OrderOfIteration(t);
// Force rehash.
t.rehash(rehash_size);
......@@ -2315,8 +2382,8 @@ TEST(Table, IterationOrderChangesOnRehash) {
// Verify that pointers are invalidated as soon as a second element is inserted.
// This prevents dependency on pointer stability on small tables.
TEST(Table, UnstablePointers) {
IntTable table;
TYPED_TEST_P(SooTest, UnstablePointers) {
TypeParam table;
const auto addr = [&](int i) {
return reinterpret_cast<uintptr_t>(&*table.find(i));
......@@ -2335,11 +2402,11 @@ TEST(TableDeathTest, InvalidIteratorAsserts) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
NonSooIntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
"erase.* called on end.. iterator.");
typename IntTable::iterator iter;
typename NonSooIntTable::iterator iter;
EXPECT_DEATH_IF_SUPPORTED(
++iter, "operator.* called on default-constructed iterator.");
t.insert(0);
......@@ -2353,6 +2420,22 @@ TEST(TableDeathTest, InvalidIteratorAsserts) {
EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
TEST(TableDeathTest, InvalidIteratorAssertsSoo) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
SooIntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
"erase.* called on end.. iterator.");
typename SooIntTable::iterator iter;
EXPECT_DEATH_IF_SUPPORTED(
++iter, "operator.* called on default-constructed iterator.");
// We can't detect the erased iterator case as invalid in SOO mode because
// the control is static constant.
}
// Invalid iterator use can trigger use-after-free in asan/hwasan,
// use-of-uninitialized-value in msan, or invalidated iterator assertions.
constexpr const char* kInvalidIteratorDeathMessage =
......@@ -2366,11 +2449,11 @@ constexpr bool kMsvc = true;
constexpr bool kMsvc = false;
#endif
TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperator) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
TypeParam t;
t.insert(1);
t.insert(2);
t.insert(3);
......@@ -2389,35 +2472,50 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
t.erase(iter2);
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
IntTable t1, t2;
TypeParam t1, t2;
t1.insert(0);
t2.insert(0);
iter1 = t1.begin();
iter2 = t2.begin();
const char* const kContainerDiffDeathMessage =
SwisstableGenerationsEnabled()
? "Invalid iterator comparison.*iterators from different hashtables"
? "Invalid iterator comparison.*iterators from different.* hashtables"
: "Invalid iterator comparison.*may be from different "
".*containers.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
}
TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex.";
#ifdef ABSL_HAVE_THREAD_SANITIZER
GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in "
"EXPECT_DEATH.";
#endif
for (int i = 0; i < 10; ++i) t1.insert(i);
// There should have been a rehash in t1.
if (kMsvc) return; // MSVC doesn't support | in regex.
TypeParam t;
t.insert(0);
auto iter = t.begin();
// Trigger a rehash in t.
for (int i = 0; i < 10; ++i) t.insert(i);
// NOTE(b/293887834): After rehashing, iterators will contain pointers to
// freed memory, which may be detected by ThreadSanitizer.
const char* const kRehashedDeathMessage =
SwisstableGenerationsEnabled()
? kInvalidIteratorDeathMessage
: "Invalid iterator comparison.*might have rehashed.*config=asan"
"|ThreadSanitizer: heap-use-after-free";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
: "Invalid iterator comparison.*might have rehashed.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) {
template <typename T>
class RawHashSamplerTest : public testing::Test {};
TYPED_TEST_SUITE_P(RawHashSamplerTest);
TYPED_TEST_P(RawHashSamplerTest, Sample) {
constexpr bool soo_enabled = std::is_same<SooIntTable, TypeParam>::value;
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
......@@ -2430,7 +2528,7 @@ TEST(RawHashSamplerTest, Sample) {
++start_size;
});
std::vector<IntTable> tables;
std::vector<TypeParam> tables;
for (int i = 0; i < 1000000; ++i) {
tables.emplace_back();
......@@ -2459,15 +2557,20 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type));
++end_size;
});
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
EXPECT_EQ(observed_checksums.size(), 5);
for (const auto& [_, count] : observed_checksums) {
EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
if (soo_enabled) {
EXPECT_EQ(observed_checksums.size(), 9);
} else {
EXPECT_EQ(observed_checksums.size(), 5);
for (const auto& [_, count] : observed_checksums) {
EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2,
0.05);
}
}
EXPECT_EQ(reservations.size(), 10);
......@@ -2479,6 +2582,10 @@ TEST(RawHashSamplerTest, Sample) {
<< reservation;
}
}
REGISTER_TYPED_TEST_SUITE_P(RawHashSamplerTest, Sample);
using RawHashSamplerTestTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
INSTANTIATE_TYPED_TEST_SUITE_P(My, RawHashSamplerTest, RawHashSamplerTestTypes);
#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
......@@ -2531,9 +2638,10 @@ using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
SanitizerTableTypes);
// TODO(b/289225379): poison inline space when empty SOO.
TEST(Sanitizer, PoisoningOnErase) {
IntTable t;
int64_t& v = *t.insert(0).first;
NonSooIntTable t;
auto& v = *t.insert(0).first;
EXPECT_FALSE(__asan_address_is_poisoned(&v));
t.erase(0);
......@@ -2581,7 +2689,7 @@ TEST(Iterator, InvalidUseCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
NonSooIntTable t;
// Start with 1 element so that `it` is never an end iterator.
t.insert(-1);
for (int i = 0; i < 10; ++i) {
......@@ -2628,11 +2736,11 @@ TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t1, t2;
NonSooIntTable t1, t2;
t1.insert(1);
auto it = t1.begin();
// ptr will become invalidated on rehash.
const int64_t* ptr = &*it;
const auto* ptr = &*it;
(void)ptr;
t2 = std::move(t1);
......@@ -2640,12 +2748,12 @@ TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
kInvalidIteratorDeathMessage);
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free");
#endif
}
TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
IntTable t;
TYPED_TEST_P(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) {
TypeParam t;
for (int i = 0; i < 8; ++i) t.insert(i);
// Want to insert twice without invalidating iterators so reserve.
const size_t cap = t.capacity();
......@@ -2687,7 +2795,7 @@ TEST(Table, GenerationInfoResetsOnClear) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
NonSooIntTable t;
for (int i = 0; i < 1000; ++i) t.insert(i);
t.reserve(t.size() + 100);
......@@ -2705,24 +2813,24 @@ TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
#endif
IntTable t;
NonSooIntTable t;
t.insert(0);
// Rehashing is guaranteed on every insertion while capacity is less than
// RehashProbabilityConstant().
int64_t i = 0;
int i = 0;
while (t.capacity() <= RehashProbabilityConstant()) {
// ptr will become invalidated on rehash.
const int64_t* ptr = &*t.begin();
const auto* ptr = &*t.begin();
t.insert(++i);
EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i;
EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i;
}
}
TEST(Iterator, InvalidComparisonDifferentTables) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
IntTable t1, t2;
IntTable::iterator default_constructed_iter;
NonSooIntTable t1, t2;
NonSooIntTable::iterator default_constructed_iter;
// We randomly use one of N empty generations for generations from empty
// hashtables. In general, we won't always detect when iterators from
// different empty hashtables are compared, but in this test case, we
......@@ -2813,10 +2921,11 @@ TEST(Table, CountedHash) {
}
}
// IterateOverFullSlots doesn't support SOO.
TEST(Table, IterateOverFullSlotsEmpty) {
IntTable t;
auto fail_if_any = [](const ctrl_t*, int64_t* i) {
FAIL() << "expected no slots " << i;
NonSooIntTable t;
auto fail_if_any = [](const ctrl_t*, auto* i) {
FAIL() << "expected no slots " << **i;
};
container_internal::IterateOverFullSlots(
RawHashSetTestOnlyAccess::GetCommon(t),
......@@ -2830,7 +2939,7 @@ TEST(Table, IterateOverFullSlotsEmpty) {
}
TEST(Table, IterateOverFullSlotsFull) {
IntTable t;
NonSooIntTable t;
std::vector<int64_t> expected_slots;
for (int64_t i = 0; i < 128; ++i) {
......@@ -2841,17 +2950,106 @@ TEST(Table, IterateOverFullSlotsFull) {
container_internal::IterateOverFullSlots(
RawHashSetTestOnlyAccess::GetCommon(t),
RawHashSetTestOnlyAccess::GetSlots(t),
[&t, &slots](const ctrl_t* ctrl, int64_t* i) {
[&t, &slots](const ctrl_t* ctrl, auto* i) {
ptrdiff_t ctrl_offset =
ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control();
ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t);
ASSERT_EQ(ctrl_offset, slot_offset);
slots.push_back(*i);
slots.push_back(**i);
});
EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots));
}
}
template <typename T>
class SooTable : public testing::Test {};
TYPED_TEST_SUITE_P(SooTable);
TYPED_TEST_P(SooTable, Basic) {
bool frozen = true;
TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)};
if (t.capacity() != SooCapacity()) {
EXPECT_LT(sizeof(void*), 8);
GTEST_SKIP() << "not SOO on this platform";
}
t.insert(0);
EXPECT_EQ(t.capacity(), 1);
auto it = t.find(0);
EXPECT_EQ(it, t.begin());
ASSERT_NE(it, t.end());
EXPECT_EQ(*it, 0);
EXPECT_EQ(++it, t.end());
EXPECT_EQ(t.find(1), t.end());
EXPECT_EQ(t.size(), 1);
t.erase(0);
EXPECT_EQ(t.size(), 0);
t.insert(1);
it = t.find(1);
EXPECT_EQ(it, t.begin());
ASSERT_NE(it, t.end());
EXPECT_EQ(*it, 1);
t.clear();
EXPECT_EQ(t.size(), 0);
}
REGISTER_TYPED_TEST_SUITE_P(SooTable, Basic);
using FreezableSooTableTypes =
::testing::Types<FreezableSizedValueSooTable<8>,
FreezableSizedValueSooTable<16>>;
INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTable, FreezableSooTableTypes);
TEST(Table, RehashToSoo) {
SooIntTable t;
if (t.capacity() != SooCapacity()) {
EXPECT_LT(sizeof(void*), 8);
GTEST_SKIP() << "not SOO on this platform";
}
t.reserve(10);
t.insert(0);
EXPECT_EQ(*t.begin(), 0);
t.rehash(0); // Rehash back down to SOO table.
EXPECT_EQ(t.capacity(), SooCapacity());
EXPECT_EQ(t.size(), 1);
EXPECT_EQ(*t.begin(), 0);
EXPECT_EQ(t.find(0), t.begin());
EXPECT_EQ(t.find(1), t.end());
}
TEST(Table, ResizeToNonSoo) {
for (int reserve_capacity : {8, 100000}) {
SooIntTable t;
t.insert(0);
t.reserve(reserve_capacity);
EXPECT_EQ(t.find(0), t.begin());
EXPECT_EQ(t.size(), 1);
EXPECT_EQ(*t.begin(), 0);
EXPECT_EQ(t.find(1), t.end());
}
}
REGISTER_TYPED_TEST_SUITE_P(
SooTest, Empty, Clear, ClearBug, Contains1, Contains2, ContainsEmpty,
CopyConstruct, CopyDifferentCapacities, CopyDifferentSizes,
EnsureNonQuadraticAsInRust, Erase, EraseBeginEnd, EraseInSmallTables,
EraseMaintainsValidIterator, FindFullDeletedRegression, HintInsert, Insert1,
Insert2, InsertEraseStressTest, InsertWithinCapacity,
IterationOrderChangesByInstance, IterationOrderChangesOnRehash,
IteratorInvalidAssertsEqualityOperator,
IteratorInvalidAssertsEqualityOperatorRehash, LargeTable, LookupEmpty,
NumDeletedRegression, Rehash, RehashDoesNotRehashWhenNotNecessary,
RehashZeroForcesRehash, ReplacingDeletedSlotDoesNotRehash,
ReservedGrowthUpdatesWhenTableDoesntGrow, Swap, UnstablePointers);
using SooTableTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTest, SooTableTypes);
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
......
......@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstddef>
#include <unordered_set>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
......@@ -38,15 +43,16 @@ void TestInlineElementSize(
// set cannot be flat_hash_set, however, since that would introduce a mutex
// deadlock.
std::unordered_set<const HashtablezInfo*>& preexisting_info, // NOLINT
std::vector<Table>& tables, const typename Table::value_type& elt,
std::vector<Table>& tables,
const std::vector<typename Table::value_type>& values,
size_t expected_element_size) {
for (int i = 0; i < 10; ++i) {
// We create a new table and must store it somewhere so that when we store
// a pointer to the resulting `HashtablezInfo` into `preexisting_info`
// that we aren't storing a dangling pointer.
tables.emplace_back();
// We must insert an element to get a hashtablez to instantiate.
tables.back().insert(elt);
// We must insert elements to get a hashtablez to instantiate.
tables.back().insert(values.begin(), values.end());
}
size_t new_count = 0;
sampler.Iterate([&](const HashtablezInfo& info) {
......@@ -82,6 +88,9 @@ TEST(FlatHashMap, SampleElementSize) {
std::vector<flat_hash_set<bigstruct>> flat_set_tables;
std::vector<node_hash_map<int, bigstruct>> node_map_tables;
std::vector<node_hash_set<bigstruct>> node_set_tables;
std::vector<bigstruct> set_values = {bigstruct{{0}}, bigstruct{{1}}};
std::vector<std::pair<const int, bigstruct>> map_values = {{0, bigstruct{}},
{1, bigstruct{}}};
// It takes thousands of new tables after changing the sampling parameters
// before you actually get some instrumentation. And if you must actually
......@@ -97,14 +106,14 @@ TEST(FlatHashMap, SampleElementSize) {
std::unordered_set<const HashtablezInfo*> preexisting_info; // NOLINT
sampler.Iterate(
[&](const HashtablezInfo& info) { preexisting_info.insert(&info); });
TestInlineElementSize(sampler, preexisting_info, flat_map_tables,
{0, bigstruct{}}, sizeof(int) + sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_map_tables,
{0, bigstruct{}}, sizeof(void*));
TestInlineElementSize(sampler, preexisting_info, flat_set_tables, //
bigstruct{}, sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_set_tables, //
bigstruct{}, sizeof(void*));
TestInlineElementSize(sampler, preexisting_info, flat_map_tables, map_values,
sizeof(int) + sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_map_tables, map_values,
sizeof(void*));
TestInlineElementSize(sampler, preexisting_info, flat_set_tables, set_values,
sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_set_tables, set_values,
sizeof(void*));
#endif
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment