Commit a1ec5d62 by Evan Brown Committed by Copybara-Service

In sanitizer mode, add generations to swisstable iterators and backing arrays so…

In sanitizer mode, add generations to swisstable iterators and backing arrays so that we can detect invalid iterator use.

PiperOrigin-RevId: 496455788
Change-Id: I83df92828098a3ef1181b4e454f3ac5d3ac7a2f2
parent dbc61b49
...@@ -619,6 +619,7 @@ cc_library( ...@@ -619,6 +619,7 @@ cc_library(
"//absl/base:core_headers", "//absl/base:core_headers",
"//absl/base:endian", "//absl/base:endian",
"//absl/base:prefetch", "//absl/base:prefetch",
"//absl/base:raw_logging_internal",
"//absl/memory", "//absl/memory",
"//absl/meta:type_traits", "//absl/meta:type_traits",
"//absl/numeric:bits", "//absl/numeric:bits",
......
...@@ -711,6 +711,7 @@ absl_cc_library( ...@@ -711,6 +711,7 @@ absl_cc_library(
absl::meta absl::meta
absl::optional absl::optional
absl::prefetch absl::prefetch
absl::raw_logging_internal
absl::utility absl::utility
absl::hashtablez_sampler absl::hashtablez_sampler
PUBLIC PUBLIC
......
...@@ -26,11 +26,14 @@ namespace container_internal { ...@@ -26,11 +26,14 @@ namespace container_internal {
// A single block of empty control bytes for tables without any slots allocated. // A single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find(). // This enables removing a branch in the hot path of find().
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = { // We have 17 bytes because there may be a generation counter. Any constant is
// fine for the generation counter.
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[17] = {
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
static_cast<ctrl_t>(0)};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth; constexpr size_t Group::kWidth;
...@@ -190,24 +193,24 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) { ...@@ -190,24 +193,24 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
slot_size); slot_size);
c.growth_left_ += (was_never_full ? 1 : 0); c.growth_left() += (was_never_full ? 1 : 0);
c.infoz().RecordErase(); c.infoz().RecordErase();
} }
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) { bool reuse) {
c.size_ = 0;
if (reuse) { if (reuse) {
c.size_ = 0;
ResetCtrl(c, policy.slot_size); ResetCtrl(c, policy.slot_size);
c.infoz().RecordStorageChanged(0, c.capacity_); c.infoz().RecordStorageChanged(0, c.capacity_);
} else { } else {
void* set = &c; void* set = &c;
(*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_); (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
c.control_ = EmptyGroup(); c.control_ = EmptyGroup();
c.set_generation_ptr(EmptyGeneration());
c.slots_ = nullptr; c.slots_ = nullptr;
c.size_ = 0;
c.capacity_ = 0; c.capacity_ = 0;
c.growth_left_ = 0; c.growth_left() = 0;
c.infoz().RecordClearedReservation(); c.infoz().RecordClearedReservation();
assert(c.size_ == 0); assert(c.size_ == 0);
c.infoz().RecordStorageChanged(0, 0); c.infoz().RecordStorageChanged(0, 0);
......
...@@ -476,27 +476,37 @@ TEST(Table, EmptyFunctorOptimization) { ...@@ -476,27 +476,37 @@ TEST(Table, EmptyFunctorOptimization) {
size_t dummy; size_t dummy;
}; };
if (std::is_empty<HashtablezInfoHandle>::value) { struct GenerationData {
EXPECT_EQ(sizeof(MockTableInfozDisabled), size_t reserved_growth;
sizeof(raw_hash_set<StringPolicy, StatelessHash, GenerationType* generation;
std::equal_to<absl::string_view>, };
std::allocator<int>>));
EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash),
sizeof(raw_hash_set<StringPolicy, StatefulHash,
std::equal_to<absl::string_view>,
std::allocator<int>>));
} else {
EXPECT_EQ(sizeof(MockTable),
sizeof(raw_hash_set<StringPolicy, StatelessHash,
std::equal_to<absl::string_view>,
std::allocator<int>>));
EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash), // Ignore unreachable-code warning. Compiler thinks one branch of each ternary
sizeof(raw_hash_set<StringPolicy, StatefulHash, // conditional is unreachable.
std::equal_to<absl::string_view>, #if defined(__clang__)
std::allocator<int>>)); #pragma clang diagnostic push
} #pragma clang diagnostic ignored "-Wunreachable-code"
#endif
constexpr size_t mock_size = std::is_empty<HashtablezInfoHandle>()
? sizeof(MockTableInfozDisabled)
: sizeof(MockTable);
constexpr size_t generation_size =
SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
EXPECT_EQ(
mock_size + generation_size,
sizeof(
raw_hash_set<StringPolicy, StatelessHash,
std::equal_to<absl::string_view>, std::allocator<int>>));
EXPECT_EQ(
mock_size + sizeof(StatefulHash) + generation_size,
sizeof(
raw_hash_set<StringPolicy, StatefulHash,
std::equal_to<absl::string_view>, std::allocator<int>>));
} }
TEST(Table, Empty) { TEST(Table, Empty) {
...@@ -2236,6 +2246,52 @@ TEST(Table, AlignOne) { ...@@ -2236,6 +2246,52 @@ TEST(Table, AlignOne) {
} }
} }
// Invalid iterator use can trigger heap-use-after-free in asan,
// use-of-uninitialized-value in msan, or invalidated iterator assertions.
constexpr const char* kInvalidIteratorDeathMessage =
"heap-use-after-free|use-of-uninitialized-value|invalidated iterator";
#if defined(__clang__) && defined(_MSC_VER)
constexpr bool kLexan = true;
#else
constexpr bool kLexan = false;
#endif
TEST(Table, InvalidIteratorUse) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
IntTable t;
// Start with 1 element so that `it` is never an end iterator.
t.insert(-1);
for (int i = 0; i < 10; ++i) {
auto it = t.begin();
t.insert(i);
EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
}
}
TEST(Table, InvalidIteratorUseWithReserve) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
IntTable t;
t.reserve(10);
t.insert(0);
auto it = t.begin();
// Reserved growth can't rehash.
for (int i = 1; i < 10; ++i) {
t.insert(i);
EXPECT_EQ(*it, 0);
}
// erase decreases size but does not decrease reserved growth so the next
// insertion still invalidates iterators.
t.erase(0);
// Unreserved growth can rehash.
t.insert(10);
EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
}
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment