Commit 8f241e77 by Evan Brown Committed by Copybara-Service

Store infoz on the heap instead of inline and store it only when we are sampling…

Store infoz on the heap instead of inline and store it only when we are sampling the current allocation.

PiperOrigin-RevId: 553847957
Change-Id: Idd131d0362bf36bd22d9bd20df54bd9ae50f0e28
parent 039d70f6
...@@ -137,18 +137,7 @@ class HashtablezInfoHandle { ...@@ -137,18 +137,7 @@ class HashtablezInfoHandle {
UnsampleSlow(info_); UnsampleSlow(info_);
} }
HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; inline bool IsSampled() const { return ABSL_PREDICT_FALSE(info_ != nullptr); }
HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
: info_(absl::exchange(o.info_, nullptr)) {}
HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
UnsampleSlow(info_);
}
info_ = absl::exchange(o.info_, nullptr);
return *this;
}
inline void RecordStorageChanged(size_t size, size_t capacity) { inline void RecordStorageChanged(size_t size, size_t capacity) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
...@@ -198,6 +187,7 @@ class HashtablezInfoHandle { ...@@ -198,6 +187,7 @@ class HashtablezInfoHandle {
explicit HashtablezInfoHandle(std::nullptr_t) {} explicit HashtablezInfoHandle(std::nullptr_t) {}
inline void Unregister() {} inline void Unregister() {}
inline bool IsSampled() const { return false; }
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {} inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordReservation(size_t /*target_capacity*/) {} inline void RecordReservation(size_t /*target_capacity*/) {}
......
...@@ -42,16 +42,11 @@ namespace container_internal { ...@@ -42,16 +42,11 @@ namespace container_internal {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer { class HashtablezInfoHandlePeer {
public: public:
static bool IsSampled(const HashtablezInfoHandle& h) {
return h.info_ != nullptr;
}
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
}; };
#else #else
class HashtablezInfoHandlePeer { class HashtablezInfoHandlePeer {
public: public:
static bool IsSampled(const HashtablezInfoHandle&) { return false; }
static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; } static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
}; };
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
...@@ -267,7 +262,7 @@ TEST(HashtablezSamplerTest, Sample) { ...@@ -267,7 +262,7 @@ TEST(HashtablezSamplerTest, Sample) {
for (int i = 0; i < 1000000; ++i) { for (int i = 0; i < 1000000; ++i) {
HashtablezInfoHandle h = Sample(test_element_size); HashtablezInfoHandle h = Sample(test_element_size);
++total; ++total;
if (HashtablezInfoHandlePeer::IsSampled(h)) { if (h.IsSampled()) {
++num_sampled; ++num_sampled;
} }
sample_rate = static_cast<double>(num_sampled) / total; sample_rate = static_cast<double>(num_sampled) / total;
...@@ -294,6 +289,7 @@ TEST(HashtablezSamplerTest, Handle) { ...@@ -294,6 +289,7 @@ TEST(HashtablezSamplerTest, Handle) {
}); });
EXPECT_TRUE(found); EXPECT_TRUE(found);
h.Unregister();
h = HashtablezInfoHandle(); h = HashtablezInfoHandle();
found = false; found = false;
sampler.Iterate([&](const HashtablezInfo& h) { sampler.Iterate([&](const HashtablezInfo& h) {
......
...@@ -220,7 +220,7 @@ void DropDeletesWithoutResize(CommonFields& common, ...@@ -220,7 +220,7 @@ void DropDeletesWithoutResize(CommonFields& common,
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) { void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
assert(IsFull(*it) && "erasing a dangling iterator"); assert(IsFull(*it) && "erasing a dangling iterator");
c.set_size(c.size() - 1); c.decrement_size();
const auto index = static_cast<size_t>(it - c.control()); const auto index = static_cast<size_t>(it - c.control());
const size_t index_before = (index - Group::kWidth) & c.capacity(); const size_t index_before = (index - Group::kWidth) & c.capacity();
const auto empty_after = Group(it).MaskEmpty(); const auto empty_after = Group(it).MaskEmpty();
...@@ -247,14 +247,15 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, ...@@ -247,14 +247,15 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
ResetCtrl(c, policy.slot_size); ResetCtrl(c, policy.slot_size);
c.infoz().RecordStorageChanged(0, c.capacity()); c.infoz().RecordStorageChanged(0, c.capacity());
} else { } else {
// We need to record infoz before calling dealloc, which will unregister
// infoz.
c.infoz().RecordClearedReservation();
c.infoz().RecordStorageChanged(0, 0);
(*policy.dealloc)(c, policy); (*policy.dealloc)(c, policy);
c.set_control(EmptyGroup()); c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration()); c.set_generation_ptr(EmptyGeneration());
c.set_slots(nullptr); c.set_slots(nullptr);
c.set_capacity(0); c.set_capacity(0);
c.infoz().RecordClearedReservation();
assert(c.size() == 0);
c.infoz().RecordStorageChanged(0, 0);
} }
} }
......
...@@ -62,6 +62,9 @@ ...@@ -62,6 +62,9 @@
// pseudo-struct: // pseudo-struct:
// //
// struct BackingArray { // struct BackingArray {
// // Sampling handler. This field isn't present when the sampling is
// // disabled or this allocation hasn't been selected for sampling.
// HashtablezInfoHandle infoz_;
// // The number of elements we can insert before growing the capacity. // // The number of elements we can insert before growing the capacity.
// size_t growth_left; // size_t growth_left;
// // Control bytes for the "real" slots. // // Control bytes for the "real" slots.
...@@ -175,6 +178,7 @@ ...@@ -175,6 +178,7 @@
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#include <algorithm> #include <algorithm>
#include <cassert>
#include <cmath> #include <cmath>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
...@@ -912,9 +916,11 @@ using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled; ...@@ -912,9 +916,11 @@ using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
// A valid capacity is a non-zero integer `2^m - 1`. // A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// Computes the offset from the start of the backing allocation of the control // Computes the offset from the start of the backing allocation of control.
// bytes. growth_left is stored at the beginning of the backing array. // infoz and growth_left are stored at the beginning of the backing array.
inline size_t ControlOffset() { return sizeof(size_t); } inline size_t ControlOffset(bool has_infoz) {
return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
}
// Returns the number of "cloned control bytes". // Returns the number of "cloned control bytes".
// //
...@@ -925,24 +931,26 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } ...@@ -925,24 +931,26 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
// Given the capacity of a table, computes the offset (from the start of the // Given the capacity of a table, computes the offset (from the start of the
// backing allocation) of the generation counter (if it exists). // backing allocation) of the generation counter (if it exists).
inline size_t GenerationOffset(size_t capacity) { inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
assert(IsValidCapacity(capacity)); assert(IsValidCapacity(capacity));
const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
return ControlOffset() + num_control_bytes; return ControlOffset(has_infoz) + num_control_bytes;
} }
// Given the capacity of a table, computes the offset (from the start of the // Given the capacity of a table, computes the offset (from the start of the
// backing allocation) at which the slots begin. // backing allocation) at which the slots begin.
inline size_t SlotOffset(size_t capacity, size_t slot_align) { inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
assert(IsValidCapacity(capacity)); assert(IsValidCapacity(capacity));
return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) & return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
slot_align - 1) &
(~slot_align + 1); (~slot_align + 1);
} }
// Given the capacity of a table, computes the total size of the backing // Given the capacity of a table, computes the total size of the backing
// array. // array.
inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
return SlotOffset(capacity, slot_align) + capacity * slot_size; bool has_infoz) {
return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
} }
// CommonFields hold the fields in raw_hash_set that do not depend // CommonFields hold the fields in raw_hash_set that do not depend
...@@ -965,20 +973,20 @@ class CommonFields : public CommonFieldsGenerationInfo { ...@@ -965,20 +973,20 @@ class CommonFields : public CommonFieldsGenerationInfo {
control_(that.control()), control_(that.control()),
slots_(that.slot_array()), slots_(that.slot_array()),
capacity_(that.capacity()), capacity_(that.capacity()),
compressed_tuple_(that.size(), std::move(that.infoz())) { size_(that.size_) {
that.set_control(EmptyGroup()); that.set_control(EmptyGroup());
that.set_slots(nullptr); that.set_slots(nullptr);
that.set_capacity(0); that.set_capacity(0);
that.set_size(0); that.size_ = 0;
} }
CommonFields& operator=(CommonFields&&) = default; CommonFields& operator=(CommonFields&&) = default;
ctrl_t* control() const { return control_; } ctrl_t* control() const { return control_; }
void set_control(ctrl_t* c) { control_ = c; } void set_control(ctrl_t* c) { control_ = c; }
void* backing_array_start() const { void* backing_array_start() const {
// growth_left is stored before control bytes. // growth_left (and maybe infoz) is stored before control bytes.
assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0); assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
return control() - sizeof(size_t); return control() - ControlOffset(has_infoz());
} }
// Note: we can't use slots() because Qt defines "slots" as a macro. // Note: we can't use slots() because Qt defines "slots" as a macro.
...@@ -986,8 +994,18 @@ class CommonFields : public CommonFieldsGenerationInfo { ...@@ -986,8 +994,18 @@ class CommonFields : public CommonFieldsGenerationInfo {
void set_slots(void* s) { slots_ = s; } void set_slots(void* s) { slots_ = s; }
// The number of filled slots. // The number of filled slots.
size_t size() const { return compressed_tuple_.template get<0>(); } size_t size() const { return size_ >> HasInfozShift(); }
void set_size(size_t s) { compressed_tuple_.template get<0>() = s; } void set_size(size_t s) {
size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
}
void increment_size() {
assert(size() < capacity());
size_ += size_t{1} << HasInfozShift();
}
void decrement_size() {
assert(size() > 0);
size_ -= size_t{1} << HasInfozShift();
}
// The total number of available slots. // The total number of available slots.
size_t capacity() const { return capacity_; } size_t capacity() const { return capacity_; }
...@@ -999,15 +1017,31 @@ class CommonFields : public CommonFieldsGenerationInfo { ...@@ -999,15 +1017,31 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The number of slots we can still fill without needing to rehash. // The number of slots we can still fill without needing to rehash.
// This is stored in the heap allocation before the control bytes. // This is stored in the heap allocation before the control bytes.
size_t growth_left() const { size_t growth_left() const {
return *reinterpret_cast<size_t*>(backing_array_start()); const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
return *gl_ptr;
} }
void set_growth_left(size_t gl) { void set_growth_left(size_t gl) {
*reinterpret_cast<size_t*>(backing_array_start()) = gl; size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
*gl_ptr = gl;
} }
HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); } bool has_infoz() const {
const HashtablezInfoHandle& infoz() const { return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
return compressed_tuple_.template get<1>(); }
void set_has_infoz(bool has_infoz) {
size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
}
HashtablezInfoHandle infoz() {
return has_infoz()
? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
: HashtablezInfoHandle();
}
void set_infoz(HashtablezInfoHandle infoz) {
assert(has_infoz());
*reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
} }
bool should_rehash_for_bug_detection_on_insert() const { bool should_rehash_for_bug_detection_on_insert() const {
...@@ -1020,7 +1054,7 @@ class CommonFields : public CommonFieldsGenerationInfo { ...@@ -1020,7 +1054,7 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The size of the backing array allocation. // The size of the backing array allocation.
size_t alloc_size(size_t slot_size, size_t slot_align) const { size_t alloc_size(size_t slot_size, size_t slot_align) const {
return AllocSize(capacity(), slot_size, slot_align); return AllocSize(capacity(), slot_size, slot_align, has_infoz());
} }
// Returns the number of control bytes set to kDeleted. For testing only. // Returns the number of control bytes set to kDeleted. For testing only.
...@@ -1030,6 +1064,12 @@ class CommonFields : public CommonFieldsGenerationInfo { ...@@ -1030,6 +1064,12 @@ class CommonFields : public CommonFieldsGenerationInfo {
} }
private: private:
// We store the has_infoz bit in the lowest bit of size_.
static constexpr size_t HasInfozShift() { return 1; }
static constexpr size_t HasInfozMask() {
return (size_t{1} << HasInfozShift()) - 1;
}
// TODO(b/182800944): Investigate removing some of these fields: // TODO(b/182800944): Investigate removing some of these fields:
// - control/slots can be derived from each other // - control/slots can be derived from each other
...@@ -1054,10 +1094,8 @@ class CommonFields : public CommonFieldsGenerationInfo { ...@@ -1054,10 +1094,8 @@ class CommonFields : public CommonFieldsGenerationInfo {
// regressions, presumably because we need capacity to do find operations. // regressions, presumably because we need capacity to do find operations.
size_t capacity_ = 0; size_t capacity_ = 0;
// Bundle together size and HashtablezInfoHandle to ensure EBO for // The size and also has one bit that stores whether we have infoz.
// HashtablezInfoHandle when sampling is turned off. size_t size_ = 0;
absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
compressed_tuple_{0u, HashtablezInfoHandle{}};
}; };
template <class Policy, class Hash, class Eq, class Alloc> template <class Policy, class Hash, class Eq, class Alloc>
...@@ -1407,23 +1445,26 @@ ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) { ...@@ -1407,23 +1445,26 @@ ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
c.slot_array() == nullptr) c.slot_array() == nullptr)
? SizeOfSlot ? SizeOfSlot
: 0; : 0;
HashtablezInfoHandle infoz =
sample_size > 0 ? Sample(sample_size) : c.infoz();
const bool has_infoz = infoz.IsSampled();
const size_t cap = c.capacity(); const size_t cap = c.capacity();
const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot); const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
// growth_left (which is a size_t) is stored with the backing array.
char* mem = static_cast<char*>( char* mem = static_cast<char*>(
Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size)); Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
const GenerationType old_generation = c.generation(); const GenerationType old_generation = c.generation();
c.set_generation_ptr( c.set_generation_ptr(reinterpret_cast<GenerationType*>(
reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap))); mem + GenerationOffset(cap, has_infoz)));
c.set_generation(NextGeneration(old_generation)); c.set_generation(NextGeneration(old_generation));
c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset())); c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
c.set_slots(mem + SlotOffset(cap, AlignOfSlot)); c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
ResetCtrl(c, SizeOfSlot); ResetCtrl(c, SizeOfSlot);
if (sample_size) { c.set_has_infoz(has_infoz);
c.infoz() = Sample(sample_size); if (has_infoz) {
infoz.RecordStorageChanged(c.size(), cap);
c.set_infoz(infoz);
} }
c.infoz().RecordStorageChanged(c.size(), cap);
} }
// PolicyFunctions bundles together some information for a particular // PolicyFunctions bundles together some information for a particular
...@@ -1464,6 +1505,7 @@ ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common, ...@@ -1464,6 +1505,7 @@ ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
policy.slot_size * common.capacity()); policy.slot_size * common.capacity());
std::allocator<char> alloc; std::allocator<char> alloc;
common.infoz().Unregister();
Deallocate<BackingArrayAlignment(AlignOfSlot)>( Deallocate<BackingArrayAlignment(AlignOfSlot)>(
&alloc, common.backing_array_start(), &alloc, common.backing_array_start(),
common.alloc_size(policy.slot_size, AlignOfSlot)); common.alloc_size(policy.slot_size, AlignOfSlot));
...@@ -1894,11 +1936,10 @@ class raw_hash_set { ...@@ -1894,11 +1936,10 @@ class raw_hash_set {
// Unpoison before returning the memory to the allocator. // Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap); SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
infoz().Unregister();
Deallocate<BackingArrayAlignment(alignof(slot_type))>( Deallocate<BackingArrayAlignment(alignof(slot_type))>(
&alloc_ref(), common().backing_array_start(), &alloc_ref(), common().backing_array_start(),
AllocSize(cap, sizeof(slot_type), alignof(slot_type))); common().alloc_size(sizeof(slot_type), alignof(slot_type)));
infoz().Unregister();
} }
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
...@@ -2518,6 +2559,7 @@ class raw_hash_set { ...@@ -2518,6 +2559,7 @@ class raw_hash_set {
assert(IsValidCapacity(new_capacity)); assert(IsValidCapacity(new_capacity));
auto* old_ctrl = control(); auto* old_ctrl = control();
auto* old_slots = slot_array(); auto* old_slots = slot_array();
const bool had_infoz = common().has_infoz();
const size_t old_capacity = common().capacity(); const size_t old_capacity = common().capacity();
common().set_capacity(new_capacity); common().set_capacity(new_capacity);
initialize_slots(); initialize_slots();
...@@ -2539,8 +2581,9 @@ class raw_hash_set { ...@@ -2539,8 +2581,9 @@ class raw_hash_set {
SanitizerUnpoisonMemoryRegion(old_slots, SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity); sizeof(slot_type) * old_capacity);
Deallocate<BackingArrayAlignment(alignof(slot_type))>( Deallocate<BackingArrayAlignment(alignof(slot_type))>(
&alloc_ref(), old_ctrl - ControlOffset(), &alloc_ref(), old_ctrl - ControlOffset(had_infoz),
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type),
had_infoz));
} }
infoz().RecordRehash(total_probe_length); infoz().RecordRehash(total_probe_length);
} }
...@@ -2686,7 +2729,7 @@ class raw_hash_set { ...@@ -2686,7 +2729,7 @@ class raw_hash_set {
rehash_and_grow_if_necessary(); rehash_and_grow_if_necessary();
target = find_first_non_full(common(), hash); target = find_first_non_full(common(), hash);
} }
common().set_size(common().size() + 1); common().increment_size();
set_growth_left(growth_left() - IsEmpty(control()[target.offset])); set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
common().maybe_increment_generation_on_insert(); common().maybe_increment_generation_on_insert();
...@@ -2751,7 +2794,7 @@ class raw_hash_set { ...@@ -2751,7 +2794,7 @@ class raw_hash_set {
slot_type* slot_array() const { slot_type* slot_array() const {
return static_cast<slot_type*>(common().slot_array()); return static_cast<slot_type*>(common().slot_array());
} }
HashtablezInfoHandle& infoz() { return common().infoz(); } HashtablezInfoHandle infoz() { return common().infoz(); }
hasher& hash_ref() { return settings_.template get<1>(); } hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); } const hasher& hash_ref() const { return settings_.template get<1>(); }
...@@ -2782,6 +2825,7 @@ class raw_hash_set { ...@@ -2782,6 +2825,7 @@ class raw_hash_set {
SanitizerUnpoisonMemoryRegion(common.slot_array(), SanitizerUnpoisonMemoryRegion(common.slot_array(),
sizeof(slot_type) * common.capacity()); sizeof(slot_type) * common.capacity());
common.infoz().Unregister();
Deallocate<BackingArrayAlignment(alignof(slot_type))>( Deallocate<BackingArrayAlignment(alignof(slot_type))>(
&set->alloc_ref(), common.backing_array_start(), &set->alloc_ref(), common.backing_array_start(),
common.alloc_size(sizeof(slot_type), alignof(slot_type))); common.alloc_size(sizeof(slot_type), alignof(slot_type)));
...@@ -2855,7 +2899,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { ...@@ -2855,7 +2899,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) { static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity(); size_t capacity = c.capacity();
if (capacity == 0) return 0; if (capacity == 0) return 0;
size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) { if (per_slot != ~size_t{}) {
...@@ -2874,8 +2918,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { ...@@ -2874,8 +2918,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t LowerBoundAllocatedByteSize(size_t size) { static size_t LowerBoundAllocatedByteSize(size_t size) {
size_t capacity = GrowthToLowerboundCapacity(size); size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0; if (capacity == 0) return 0;
size_t m = size_t m = AllocSize(NormalizeCapacity(capacity), sizeof(Slot),
AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); alignof(Slot), /*has_infoz=*/false);
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr)); size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) { if (per_slot != ~size_t{}) {
m += per_slot * size; m += per_slot * size;
......
...@@ -524,13 +524,6 @@ TEST(Table, EmptyFunctorOptimization) { ...@@ -524,13 +524,6 @@ TEST(Table, EmptyFunctorOptimization) {
static_assert(std::is_empty<std::allocator<int>>::value, ""); static_assert(std::is_empty<std::allocator<int>>::value, "");
struct MockTable { struct MockTable {
void* infoz;
void* ctrl;
void* slots;
size_t size;
size_t capacity;
};
struct MockTableInfozDisabled {
void* ctrl; void* ctrl;
void* slots; void* slots;
size_t size; size_t size;
...@@ -555,9 +548,7 @@ TEST(Table, EmptyFunctorOptimization) { ...@@ -555,9 +548,7 @@ TEST(Table, EmptyFunctorOptimization) {
#pragma clang diagnostic push #pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunreachable-code" #pragma clang diagnostic ignored "-Wunreachable-code"
#endif #endif
constexpr size_t mock_size = std::is_empty<HashtablezInfoHandle>() constexpr size_t mock_size = sizeof(MockTable);
? sizeof(MockTableInfozDisabled)
: sizeof(MockTable);
constexpr size_t generation_size = constexpr size_t generation_size =
SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0; SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
#if defined(__clang__) #if defined(__clang__)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment