Commit f61aac8c by Evan Brown Committed by Copybara-Service

Add validation against use of moved-from hash tables.

We enable this validation when sanitizers are enabled rather than in debug mode because debug mode can be enabled/disabled separately in different translation units, which can cause bugs for this type of validation. E.g. a hashtable that was moved from in a TU with debug enabled is then destroyed in a TU with debug disabled.

PiperOrigin-RevId: 668492783
Change-Id: Ifab6894de0890aa0a7d8ea338b53b6759b097cb1
parent 0f938287
...@@ -545,6 +545,7 @@ enum InvalidCapacity : size_t { ...@@ -545,6 +545,7 @@ enum InvalidCapacity : size_t {
kAboveMaxValidCapacity = ~size_t{} - 100, kAboveMaxValidCapacity = ~size_t{} - 100,
kReentrance, kReentrance,
kDestroyed, kDestroyed,
kMovedFrom,
}; };
// Returns a pointer to a control byte group that can be used by empty tables. // Returns a pointer to a control byte group that can be used by empty tables.
...@@ -2402,6 +2403,10 @@ class raw_hash_set { ...@@ -2402,6 +2403,10 @@ class raw_hash_set {
alignof(slot_type) <= alignof(HeapOrSoo); alignof(slot_type) <= alignof(HeapOrSoo);
} }
constexpr static size_t DefaultCapacity() {
return SooEnabled() ? SooCapacity() : 0;
}
// Whether `size` fits in the SOO capacity of this table. // Whether `size` fits in the SOO capacity of this table.
bool fits_in_soo(size_t size) const { bool fits_in_soo(size_t size) const {
return SooEnabled() && size <= SooCapacity(); return SooEnabled() && size <= SooCapacity();
...@@ -2639,7 +2644,7 @@ class raw_hash_set { ...@@ -2639,7 +2644,7 @@ class raw_hash_set {
const allocator_type& alloc = allocator_type()) const allocator_type& alloc = allocator_type())
: settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq, : settings_(CommonFields::CreateDefault<SooEnabled()>(), hash, eq,
alloc) { alloc) {
if (bucket_count > (SooEnabled() ? SooCapacity() : 0)) { if (bucket_count > DefaultCapacity()) {
resize(NormalizeCapacity(bucket_count)); resize(NormalizeCapacity(bucket_count));
} }
} }
...@@ -2822,7 +2827,7 @@ class raw_hash_set { ...@@ -2822,7 +2827,7 @@ class raw_hash_set {
transfer(soo_slot(), that.soo_slot()); transfer(soo_slot(), that.soo_slot());
} }
that.common() = CommonFields::CreateDefault<SooEnabled()>(); that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move(); annotate_for_bug_detection_on_move(that);
} }
raw_hash_set(raw_hash_set&& that, const allocator_type& a) raw_hash_set(raw_hash_set&& that, const allocator_type& a)
...@@ -2830,7 +2835,7 @@ class raw_hash_set { ...@@ -2830,7 +2835,7 @@ class raw_hash_set {
that.eq_ref(), a) { that.eq_ref(), a) {
if (a == that.alloc_ref()) { if (a == that.alloc_ref()) {
swap_common(that); swap_common(that);
maybe_increment_generation_or_rehash_on_move(); annotate_for_bug_detection_on_move(that);
} else { } else {
move_elements_allocs_unequal(std::move(that)); move_elements_allocs_unequal(std::move(that));
} }
...@@ -2896,15 +2901,19 @@ class raw_hash_set { ...@@ -2896,15 +2901,19 @@ class raw_hash_set {
size_t size() const { return common().size(); } size_t size() const { return common().size(); }
size_t capacity() const { size_t capacity() const {
const size_t cap = common().capacity(); const size_t cap = common().capacity();
// Compiler complains when using functions in assume so use local variables. // Compiler complains when using functions in ASSUME so use local variable.
ABSL_ATTRIBUTE_UNUSED static constexpr bool kEnabled = SooEnabled(); ABSL_ATTRIBUTE_UNUSED static constexpr size_t kDefaultCapacity =
ABSL_ATTRIBUTE_UNUSED static constexpr size_t kCapacity = SooCapacity(); DefaultCapacity();
ABSL_ASSUME(!kEnabled || cap >= kCapacity); ABSL_ASSUME(cap >= kDefaultCapacity);
return cap; return cap;
} }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); } size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() { ABSL_ATTRIBUTE_REINITIALIZES void clear() {
if (SwisstableGenerationsEnabled() &&
capacity() == InvalidCapacity::kMovedFrom) {
common().set_capacity(DefaultCapacity());
}
AssertNotDebugCapacity(); AssertNotDebugCapacity();
// Iterating over this container is O(bucket_count()). When bucket_count() // Iterating over this container is O(bucket_count()). When bucket_count()
// is much greater than size(), iteration becomes prohibitively expensive. // is much greater than size(), iteration becomes prohibitively expensive.
...@@ -3586,6 +3595,10 @@ class raw_hash_set { ...@@ -3586,6 +3595,10 @@ class raw_hash_set {
} }
inline void destructor_impl() { inline void destructor_impl() {
if (SwisstableGenerationsEnabled() &&
capacity() == InvalidCapacity::kMovedFrom) {
return;
}
if (capacity() == 0) return; if (capacity() == 0) return;
if (is_soo()) { if (is_soo()) {
if (!empty()) { if (!empty()) {
...@@ -3759,8 +3772,16 @@ class raw_hash_set { ...@@ -3759,8 +3772,16 @@ class raw_hash_set {
move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp)); move_common(that_is_full_soo, that.alloc_ref(), common(), std::move(tmp));
} }
void maybe_increment_generation_or_rehash_on_move() { void annotate_for_bug_detection_on_move(
if (!SwisstableGenerationsEnabled() || capacity() == 0 || is_soo()) { ABSL_ATTRIBUTE_UNUSED raw_hash_set& that) {
// We only enable moved-from validation when generations are enabled (rather
// than using NDEBUG) to avoid issues in which NDEBUG is enabled in some
// translation units but not in others.
if (SwisstableGenerationsEnabled() && this != &that) {
that.common().set_capacity(InvalidCapacity::kMovedFrom);
}
if (!SwisstableGenerationsEnabled() || capacity() == DefaultCapacity() ||
capacity() > kAboveMaxValidCapacity) {
return; return;
} }
common().increment_generation(); common().increment_generation();
...@@ -3782,7 +3803,7 @@ class raw_hash_set { ...@@ -3782,7 +3803,7 @@ class raw_hash_set {
CopyAlloc(alloc_ref(), that.alloc_ref(), CopyAlloc(alloc_ref(), that.alloc_ref(),
std::integral_constant<bool, propagate_alloc>()); std::integral_constant<bool, propagate_alloc>());
that.common() = CommonFields::CreateDefault<SooEnabled()>(); that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move(); annotate_for_bug_detection_on_move(that);
return *this; return *this;
} }
...@@ -3796,7 +3817,7 @@ class raw_hash_set { ...@@ -3796,7 +3817,7 @@ class raw_hash_set {
} }
if (!that.is_soo()) that.dealloc(); if (!that.is_soo()) that.dealloc();
that.common() = CommonFields::CreateDefault<SooEnabled()>(); that.common() = CommonFields::CreateDefault<SooEnabled()>();
maybe_increment_generation_or_rehash_on_move(); annotate_for_bug_detection_on_move(that);
return *this; return *this;
} }
...@@ -3875,27 +3896,30 @@ class raw_hash_set { ...@@ -3875,27 +3896,30 @@ class raw_hash_set {
// Asserts for correctness that we run on find/find_or_prepare_insert. // Asserts for correctness that we run on find/find_or_prepare_insert.
template <class K> template <class K>
void AssertOnFind(ABSL_ATTRIBUTE_UNUSED const K& key) { void AssertOnFind(ABSL_ATTRIBUTE_UNUSED const K& key) {
#ifdef NDEBUG
return;
#endif
AssertHashEqConsistent(key); AssertHashEqConsistent(key);
AssertNotDebugCapacity(); AssertNotDebugCapacity();
} }
// Asserts that the capacity is not a sentinel invalid value. // Asserts that the capacity is not a sentinel invalid value.
// TODO(b/296061262): also add asserts for moved-from state.
void AssertNotDebugCapacity() const { void AssertNotDebugCapacity() const {
assert(capacity() != InvalidCapacity::kReentrance && assert(capacity() != InvalidCapacity::kReentrance &&
"reentrant container access during element construction/destruction " "Reentrant container access during element construction/destruction "
"is not allowed."); "is not allowed.");
assert(capacity() != InvalidCapacity::kDestroyed && assert(capacity() != InvalidCapacity::kDestroyed &&
"use of destroyed hash table."); "Use of destroyed hash table.");
if (SwisstableGenerationsEnabled() &&
ABSL_PREDICT_FALSE(capacity() == InvalidCapacity::kMovedFrom)) {
ABSL_RAW_LOG(FATAL, "Use of moved-from hash table.");
}
} }
// Asserts that hash and equal functors provided by the user are consistent, // Asserts that hash and equal functors provided by the user are consistent,
// meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`. // meaning that `eq(k1, k2)` implies `hash(k1)==hash(k2)`.
template <class K> template <class K>
void AssertHashEqConsistent(const K& key) { void AssertHashEqConsistent(const K& key) {
#ifdef NDEBUG
return;
#endif
// If the hash/eq functors are known to be consistent, then skip validation. // If the hash/eq functors are known to be consistent, then skip validation.
if (std::is_same<hasher, absl::container_internal::StringHash>::value && if (std::is_same<hasher, absl::container_internal::StringHash>::value &&
std::is_same<key_equal, absl::container_internal::StringEq>::value) { std::is_same<key_equal, absl::container_internal::StringEq>::value) {
......
...@@ -3674,6 +3674,29 @@ TEST(Table, DestroyedCallsFail) { ...@@ -3674,6 +3674,29 @@ TEST(Table, DestroyedCallsFail) {
#endif #endif
} }
TEST(Table, MovedFromCallsFail) {
if (!SwisstableGenerationsEnabled()) {
GTEST_SKIP() << "Moved-from checks only enabled in sanitizer mode.";
return;
}
{
ABSL_ATTRIBUTE_UNUSED IntTable t1, t2;
t1.insert(1);
t2 = std::move(t1);
// NOLINTNEXTLINE(bugprone-use-after-move)
EXPECT_DEATH_IF_SUPPORTED(t1.contains(1), "");
}
{
ABSL_ATTRIBUTE_UNUSED IntTable t1;
t1.insert(1);
ABSL_ATTRIBUTE_UNUSED IntTable t2(std::move(t1));
// NOLINTNEXTLINE(bugprone-use-after-move)
EXPECT_DEATH_IF_SUPPORTED(t1.contains(1), "");
t1.clear(); // Clearing a moved-from table is allowed.
}
}
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment