Commit 9336be04 by Abseil Team Committed by Andy Getz

Export of internal Abseil changes

--
e7f53dfbf809812e84770217777f81b6308a3084 by Abseil Team <absl-team@google.com>:

Add a parameter pack to absl profile to allow profiles to separate
dynamic data from static data that is available at constructor-time.

Background: `inline_element_size` is effectively constant, but there
is a data race between its initialization and its access. We had fixed that race by making
inline_element_size atomic.  This CL changes `inline_element_size`
back to a non-atomic integer, and provides a way for all profiles to
provide Register()-time values.
PiperOrigin-RevId: 413960559

--
70234c5943f8e37e17c1d9c54d8ed61d39880abf by Chris Kennelly <ckennelly@google.com>:

Document that absl::FunctionRef does not allocate.

PiperOrigin-RevId: 413946831

--
3308ae571412c4be3cc32d088c6edac98ff2d1ed by Samuel Benzaquen <sbenza@google.com>:

Internal change

PiperOrigin-RevId: 413933619

--
1617093a730d055edcf7bc04fdd6509783f5f75d by Martijn Vels <mvels@google.com>:

Internal Change

PiperOrigin-RevId: 413778735

--
03ad683f059c806a6c8b04f5b79b2662c3df8c73 by Evan Brown <ezb@google.com>:

Unify btree erase_if definitions and optimize them so that we only do rebalancing once per leaf node.

PiperOrigin-RevId: 413757280

--
5ba402f70801938178e486617063f01c7862525d by Martijn Vels <mvels@google.com>:

Cleanup up cord sampling internals

PiperOrigin-RevId: 413755011

--
522da8f9d3e0f11630d89fb41952004742bc335a by Evan Brown <ezb@google.com>:

Add b-tree benchmark for erase_if.

Since this benchmark doesn't work for std:: containers before C++20, disable it for them.

PiperOrigin-RevId: 413740844

--
a690ea42de8ed4a761d00235d8b2fb7548ba9732 by Andy Getzendanner <durandal@google.com>:

Import of CCTZ from GitHub.

PiperOrigin-RevId: 413735737
GitOrigin-RevId: e7f53dfbf809812e84770217777f81b6308a3084
Change-Id: I4f9f9039ba92831bc48971964aa063244c9fed72
parent e11e039e
...@@ -241,6 +241,7 @@ cc_library( ...@@ -241,6 +241,7 @@ cc_library(
":hash_function_defaults", ":hash_function_defaults",
":raw_hash_map", ":raw_hash_map",
"//absl/algorithm:container", "//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory", "//absl/memory",
], ],
) )
...@@ -310,6 +311,7 @@ cc_library( ...@@ -310,6 +311,7 @@ cc_library(
":node_slot_policy", ":node_slot_policy",
":raw_hash_map", ":raw_hash_map",
"//absl/algorithm:container", "//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory", "//absl/memory",
], ],
) )
...@@ -342,6 +344,7 @@ cc_library( ...@@ -342,6 +344,7 @@ cc_library(
":node_slot_policy", ":node_slot_policy",
":raw_hash_set", ":raw_hash_set",
"//absl/algorithm:container", "//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory", "//absl/memory",
], ],
) )
......
...@@ -274,6 +274,7 @@ absl_cc_library( ...@@ -274,6 +274,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
DEPS DEPS
absl::container_memory absl::container_memory
absl::core_headers
absl::hash_function_defaults absl::hash_function_defaults
absl::raw_hash_map absl::raw_hash_map
absl::algorithm_container absl::algorithm_container
...@@ -347,6 +348,7 @@ absl_cc_library( ...@@ -347,6 +348,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
DEPS DEPS
absl::container_memory absl::container_memory
absl::core_headers
absl::hash_function_defaults absl::hash_function_defaults
absl::node_slot_policy absl::node_slot_policy
absl::raw_hash_map absl::raw_hash_map
...@@ -381,6 +383,7 @@ absl_cc_library( ...@@ -381,6 +383,7 @@ absl_cc_library(
COPTS COPTS
${ABSL_DEFAULT_COPTS} ${ABSL_DEFAULT_COPTS}
DEPS DEPS
absl::core_headers
absl::hash_function_defaults absl::hash_function_defaults
absl::node_slot_policy absl::node_slot_policy
absl::raw_hash_set absl::raw_hash_set
......
...@@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) { ...@@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) {
BM_LookupImpl<T>(state, true); BM_LookupImpl<T>(state, true);
} }
// Benchmark deletion of values from a container. // Benchmark erasing values from a container.
template <typename T> template <typename T>
void BM_Delete(benchmark::State& state) { void BM_Erase(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type; using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value; typename KeyOfValue<typename T::key_type, V>::type key_of_value;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues); std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
...@@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) { ...@@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) {
} }
} }
// Benchmark deletion of multiple values from a container. // Benchmark erasing multiple values from a container.
template <typename T> template <typename T>
void BM_DeleteRange(benchmark::State& state) { void BM_EraseRange(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type; using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value; typename KeyOfValue<typename T::key_type, V>::type key_of_value;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues); std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
...@@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) { ...@@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) {
} }
} }
// Predicate that erases every other element. We can't use a lambda because
// C++11 doesn't support generic lambdas.
// TODO(b/207389011): consider adding benchmarks that remove different fractions
// of keys (e.g. 10%, 90%).
struct EraseIfPred {
uint64_t i = 0;
template <typename T>
bool operator()(const T&) {
return ++i % 2;
}
};
// Benchmark erasing multiple values from a container with a predicate.
template <typename T>
void BM_EraseIf(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
// Removes half of the keys per batch.
const int batch_size = (kBenchmarkValues + 1) / 2;
EraseIfPred pred;
while (state.KeepRunningBatch(batch_size)) {
state.PauseTiming();
{
T container(values.begin(), values.end());
state.ResumeTiming();
erase_if(container, pred);
benchmark::DoNotOptimize(container);
state.PauseTiming();
}
state.ResumeTiming();
}
}
// Benchmark steady-state insert (into first half of range) and remove (from // Benchmark steady-state insert (into first half of range) and remove (from
// second half of range), treating the container approximately like a queue with // second half of range), treating the container approximately like a queue with
// log-time access for all elements. This benchmark does not test the case where // log-time access for all elements. This benchmark does not test the case where
...@@ -477,14 +511,14 @@ BTREE_TYPES(Time); ...@@ -477,14 +511,14 @@ BTREE_TYPES(Time);
void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \ void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
BENCHMARK(BM_##type##_##func) BENCHMARK(BM_##type##_##func)
#define MY_BENCHMARK3(type) \ #define MY_BENCHMARK3_STL(type) \
MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, Insert); \
MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSorted); \
MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, InsertSmall); \
MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, Lookup); \
MY_BENCHMARK4(type, FullLookup); \ MY_BENCHMARK4(type, FullLookup); \
MY_BENCHMARK4(type, Delete); \ MY_BENCHMARK4(type, Erase); \
MY_BENCHMARK4(type, DeleteRange); \ MY_BENCHMARK4(type, EraseRange); \
MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, QueueAddRem); \
MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \
MY_BENCHMARK4(type, Fifo); \ MY_BENCHMARK4(type, Fifo); \
...@@ -492,9 +526,13 @@ BTREE_TYPES(Time); ...@@ -492,9 +526,13 @@ BTREE_TYPES(Time);
MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeRandom); \
MY_BENCHMARK4(type, InsertRangeSorted) MY_BENCHMARK4(type, InsertRangeSorted)
#define MY_BENCHMARK3(type) \
MY_BENCHMARK4(type, EraseIf); \
MY_BENCHMARK3_STL(type)
#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
MY_BENCHMARK3(stl_##type); \ MY_BENCHMARK3_STL(stl_##type); \
MY_BENCHMARK3(stl_unordered_##type); \ MY_BENCHMARK3_STL(stl_unordered_##type); \
MY_BENCHMARK3(btree_256_##type) MY_BENCHMARK3(btree_256_##type)
#define MY_BENCHMARK2(type) \ #define MY_BENCHMARK2(type) \
...@@ -684,12 +722,12 @@ double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) { ...@@ -684,12 +722,12 @@ double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
btree_set<BigTypePtr<SIZE>>; \ btree_set<BigTypePtr<SIZE>>; \
using btree_256_map_size##SIZE##copies##SIZE##ptr = \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \
btree_map<int, BigTypePtr<SIZE>>; \ btree_map<int, BigTypePtr<SIZE>>; \
MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
......
...@@ -482,15 +482,7 @@ void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) { ...@@ -482,15 +482,7 @@ void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) {
template <typename K, typename V, typename C, typename A, typename Pred> template <typename K, typename V, typename C, typename A, typename Pred>
typename btree_map<K, V, C, A>::size_type erase_if( typename btree_map<K, V, C, A>::size_type erase_if(
btree_map<K, V, C, A> &map, Pred pred) { btree_map<K, V, C, A> &map, Pred pred) {
const auto initial_size = map.size(); return container_internal::btree_access::erase_if(map, std::move(pred));
for (auto it = map.begin(); it != map.end();) {
if (pred(*it)) {
it = map.erase(it);
} else {
++it;
}
}
return initial_size - map.size();
} }
// absl::btree_multimap // absl::btree_multimap
...@@ -817,15 +809,7 @@ void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) { ...@@ -817,15 +809,7 @@ void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) {
template <typename K, typename V, typename C, typename A, typename Pred> template <typename K, typename V, typename C, typename A, typename Pred>
typename btree_multimap<K, V, C, A>::size_type erase_if( typename btree_multimap<K, V, C, A>::size_type erase_if(
btree_multimap<K, V, C, A> &map, Pred pred) { btree_multimap<K, V, C, A> &map, Pred pred) {
const auto initial_size = map.size(); return container_internal::btree_access::erase_if(map, std::move(pred));
for (auto it = map.begin(); it != map.end();) {
if (pred(*it)) {
it = map.erase(it);
} else {
++it;
}
}
return initial_size - map.size();
} }
namespace container_internal { namespace container_internal {
......
...@@ -402,15 +402,7 @@ void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) { ...@@ -402,15 +402,7 @@ void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) {
template <typename K, typename C, typename A, typename Pred> template <typename K, typename C, typename A, typename Pred>
typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set, typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set,
Pred pred) { Pred pred) {
const auto initial_size = set.size(); return container_internal::btree_access::erase_if(set, std::move(pred));
for (auto it = set.begin(); it != set.end();) {
if (pred(*it)) {
it = set.erase(it);
} else {
++it;
}
}
return initial_size - set.size();
} }
// absl::btree_multiset<> // absl::btree_multiset<>
...@@ -732,15 +724,7 @@ void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) { ...@@ -732,15 +724,7 @@ void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) {
template <typename K, typename C, typename A, typename Pred> template <typename K, typename C, typename A, typename Pred>
typename btree_multiset<K, C, A>::size_type erase_if( typename btree_multiset<K, C, A>::size_type erase_if(
btree_multiset<K, C, A> & set, Pred pred) { btree_multiset<K, C, A> & set, Pred pred) {
const auto initial_size = set.size(); return container_internal::btree_access::erase_if(set, std::move(pred));
for (auto it = set.begin(); it != set.end();) {
if (pred(*it)) {
it = set.erase(it);
} else {
++it;
}
}
return initial_size - set.size();
} }
namespace container_internal { namespace container_internal {
......
...@@ -2490,6 +2490,20 @@ TEST(Btree, EraseIf) { ...@@ -2490,6 +2490,20 @@ TEST(Btree, EraseIf) {
EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, ElementsAre(1, 3, 5)); EXPECT_THAT(s, ElementsAre(1, 3, 5));
} }
// Test that erase_if invokes the predicate once per element.
{
absl::btree_set<int> s;
for (int i = 0; i < 1000; ++i) s.insert(i);
int pred_calls = 0;
EXPECT_EQ(erase_if(s,
[&pred_calls](int k) {
++pred_calls;
return k % 2;
}),
500);
EXPECT_THAT(s, SizeIs(500));
EXPECT_EQ(pred_calls, 1000);
}
} }
TEST(Btree, InsertOrAssign) { TEST(Btree, InsertOrAssign) {
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <utility> #include <utility>
#include "absl/algorithm/container.h" #include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h" #include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
......
...@@ -830,6 +830,7 @@ class btree_node { ...@@ -830,6 +830,7 @@ class btree_node {
template <typename N, typename R, typename P> template <typename N, typename R, typename P>
friend struct btree_iterator; friend struct btree_iterator;
friend class BtreeNodePeer; friend class BtreeNodePeer;
friend struct btree_access;
}; };
template <typename Node, typename Reference, typename Pointer> template <typename Node, typename Reference, typename Pointer>
...@@ -964,6 +965,7 @@ struct btree_iterator { ...@@ -964,6 +965,7 @@ struct btree_iterator {
friend class btree_multiset_container; friend class btree_multiset_container;
template <typename TreeType, typename CheckerType> template <typename TreeType, typename CheckerType>
friend class base_checker; friend class base_checker;
friend struct btree_access;
const key_type &key() const { return node->key(position); } const key_type &key() const { return node->key(position); }
slot_type *slot() { return node->slot(position); } slot_type *slot() { return node->slot(position); }
...@@ -1336,6 +1338,8 @@ class btree { ...@@ -1336,6 +1338,8 @@ class btree {
allocator_type get_allocator() const { return allocator(); } allocator_type get_allocator() const { return allocator(); }
private: private:
friend struct btree_access;
// Internal accessor routines. // Internal accessor routines.
node_type *root() { return root_.template get<2>(); } node_type *root() { return root_.template get<2>(); }
const node_type *root() const { return root_.template get<2>(); } const node_type *root() const { return root_.template get<2>(); }
...@@ -2530,6 +2534,48 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo, ...@@ -2530,6 +2534,48 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
return count; return count;
} }
struct btree_access {
template <typename BtreeContainer, typename Pred>
static auto erase_if(BtreeContainer &container, Pred pred)
-> typename BtreeContainer::size_type {
const auto initial_size = container.size();
auto &tree = container.tree_;
auto *alloc = tree.mutable_allocator();
for (auto it = container.begin(); it != container.end();) {
if (!pred(*it)) {
++it;
continue;
}
auto *node = it.node;
if (!node->leaf()) {
// Handle internal nodes normally.
it = container.erase(it);
continue;
}
// If this is a leaf node, then we do all the erases from this node
// at once before doing rebalancing.
// The current position to transfer slots to.
int to_pos = it.position;
node->value_destroy(it.position, alloc);
while (++it.position < node->finish()) {
if (pred(*it)) {
node->value_destroy(it.position, alloc);
} else {
node->transfer(node->slot(to_pos++), node->slot(it.position),
alloc);
}
}
const int num_deleted = node->finish() - to_pos;
tree.size_ -= num_deleted;
node->set_finish(to_pos);
it.position = to_pos;
it = tree.rebalance_after_delete(it);
}
return initial_size - container.size();
}
};
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
......
...@@ -228,6 +228,7 @@ class btree_container { ...@@ -228,6 +228,7 @@ class btree_container {
} }
protected: protected:
friend struct btree_access;
Tree tree_; Tree tree_;
}; };
......
...@@ -61,13 +61,10 @@ HashtablezSampler& GlobalHashtablezSampler() { ...@@ -61,13 +61,10 @@ HashtablezSampler& GlobalHashtablezSampler() {
return *sampler; return *sampler;
} }
// TODO(bradleybear): The comments at this constructors declaration say that the HashtablezInfo::HashtablezInfo() = default;
// fields are not initialized, but this definition does initialize the fields.
// Something needs to be cleaned up.
HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
HashtablezInfo::~HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default;
void HashtablezInfo::PrepareForSampling() { void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed); capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed);
...@@ -85,6 +82,7 @@ void HashtablezInfo::PrepareForSampling() { ...@@ -85,6 +82,7 @@ void HashtablezInfo::PrepareForSampling() {
// instead. // instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0); /* skip_count= */ 0);
inline_element_size = inline_element_size_value;
} }
static bool ShouldForceSampling() { static bool ShouldForceSampling() {
...@@ -110,9 +108,8 @@ static bool ShouldForceSampling() { ...@@ -110,9 +108,8 @@ static bool ShouldForceSampling() {
HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1; *next_sample = 1;
HashtablezInfo* result = GlobalHashtablezSampler().Register(); HashtablezInfo* result =
result->inline_element_size.store(inline_element_size, GlobalHashtablezSampler().Register(inline_element_size);
std::memory_order_relaxed);
return result; return result;
} }
...@@ -138,10 +135,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { ...@@ -138,10 +135,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
return SampleSlow(next_sample, inline_element_size); return SampleSlow(next_sample, inline_element_size);
} }
HashtablezInfo* result = GlobalHashtablezSampler().Register(); return GlobalHashtablezSampler().Register(inline_element_size);
result->inline_element_size.store(inline_element_size,
std::memory_order_relaxed);
return result;
#endif #endif
} }
......
...@@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { ...@@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members, // Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object. // blocking for any readers that are currently sampling the object.
void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); void PrepareForSampling(size_t inline_element_size_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be // These fields are mutated by the various Record* APIs and need to be
// thread-safe. // thread-safe.
...@@ -82,18 +83,6 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { ...@@ -82,18 +83,6 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> hashes_bitwise_xor; std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve; std::atomic<size_t> max_reserve;
// One could imagine that inline_element_size could be non-atomic, since it
// *almost* follows the rules for the fields that are set by
// `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in
// which
// A: Thread 1: Register() returns, unlocking init_mu.
// B: Thread 2: Iterate() is called, locking init_mu.
// C: Thread 1: inlined_element_size is stored.
// D: Thread 2: inlined_element_size is accessed (a race).
// A simple solution is to make inline_element_size atomic so that we treat it
// at as we do the other atomic fields.
std::atomic<size_t> inline_element_size;
// All of the fields below are set by `PrepareForSampling`, they must not be // All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense. // mutated in `Record*` functions. They are logically `const` in that sense.
// These are guarded by init_mu, but that is not externalized to clients, // These are guarded by init_mu, but that is not externalized to clients,
...@@ -103,6 +92,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { ...@@ -103,6 +92,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
absl::Time create_time; absl::Time create_time;
int32_t depth; int32_t depth;
void* stack[kMaxStackDepth]; void* stack[kMaxStackDepth];
size_t inline_element_size; // How big is the slot?
}; };
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
......
...@@ -70,7 +70,8 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) { ...@@ -70,7 +70,8 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
} }
HashtablezInfo* Register(HashtablezSampler* s, size_t size) { HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
auto* info = s->Register(); const size_t test_element_size = 17;
auto* info = s->Register(test_element_size);
assert(info != nullptr); assert(info != nullptr);
info->size.store(size); info->size.store(size);
return info; return info;
...@@ -81,9 +82,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) { ...@@ -81,9 +82,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
const size_t test_element_size = 17; const size_t test_element_size = 17;
HashtablezInfo info; HashtablezInfo info;
absl::MutexLock l(&info.init_mu); absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(); info.PrepareForSampling(test_element_size);
info.inline_element_size.store(test_element_size, std::memory_order_relaxed);
EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
...@@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { ...@@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start); EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.inline_element_size.load(), test_element_size); EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed); info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed);
...@@ -108,7 +108,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { ...@@ -108,7 +108,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.max_reserve.store(1, std::memory_order_relaxed); info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20); info.create_time = test_start - absl::Hours(20);
info.PrepareForSampling(); info.PrepareForSampling(test_element_size);
EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
...@@ -119,14 +119,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) { ...@@ -119,14 +119,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_EQ(info.inline_element_size.load(), test_element_size); EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start); EXPECT_GE(info.create_time, test_start);
} }
TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info; HashtablezInfo info;
absl::MutexLock l(&info.init_mu); absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(); const size_t test_element_size = 19;
info.PrepareForSampling(test_element_size);
RecordStorageChangedSlow(&info, 17, 47); RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47); EXPECT_EQ(info.capacity.load(), 47);
...@@ -138,7 +139,8 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { ...@@ -138,7 +139,8 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
TEST(HashtablezInfoTest, RecordInsert) { TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info; HashtablezInfo info;
absl::MutexLock l(&info.init_mu); absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(); const size_t test_element_size = 23;
info.PrepareForSampling(test_element_size);
EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.max_probe_length.load(), 6);
...@@ -161,8 +163,7 @@ TEST(HashtablezInfoTest, RecordErase) { ...@@ -161,8 +163,7 @@ TEST(HashtablezInfoTest, RecordErase) {
const size_t test_element_size = 29; const size_t test_element_size = 29;
HashtablezInfo info; HashtablezInfo info;
absl::MutexLock l(&info.init_mu); absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(); info.PrepareForSampling(test_element_size);
info.inline_element_size.store(test_element_size);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
...@@ -170,15 +171,14 @@ TEST(HashtablezInfoTest, RecordErase) { ...@@ -170,15 +171,14 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info); RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1); EXPECT_EQ(info.num_erases.load(), 1);
EXPECT_EQ(info.inline_element_size.load(), test_element_size); EXPECT_EQ(info.inline_element_size, test_element_size);
} }
TEST(HashtablezInfoTest, RecordRehash) { TEST(HashtablezInfoTest, RecordRehash) {
const size_t test_element_size = 31; const size_t test_element_size = 31;
HashtablezInfo info; HashtablezInfo info;
absl::MutexLock l(&info.init_mu); absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(); info.PrepareForSampling(test_element_size);
info.inline_element_size.store(test_element_size);
RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength);
...@@ -197,13 +197,14 @@ TEST(HashtablezInfoTest, RecordRehash) { ...@@ -197,13 +197,14 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1); EXPECT_EQ(info.num_rehashes.load(), 1);
EXPECT_EQ(info.inline_element_size.load(), test_element_size); EXPECT_EQ(info.inline_element_size, test_element_size);
} }
TEST(HashtablezInfoTest, RecordReservation) { TEST(HashtablezInfoTest, RecordReservation) {
HashtablezInfo info; HashtablezInfo info;
absl::MutexLock l(&info.init_mu); absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(); const size_t test_element_size = 33;
info.PrepareForSampling(test_element_size);
RecordReservationSlow(&info, 3); RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3); EXPECT_EQ(info.max_reserve.load(), 3);
...@@ -266,7 +267,8 @@ TEST(HashtablezSamplerTest, Sample) { ...@@ -266,7 +267,8 @@ TEST(HashtablezSamplerTest, Sample) {
TEST(HashtablezSamplerTest, Handle) { TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler(); auto& sampler = GlobalHashtablezSampler();
HashtablezInfoHandle h(sampler.Register()); const size_t test_element_size = 39;
HashtablezInfoHandle h(sampler.Register(test_element_size));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h); auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
...@@ -338,18 +340,19 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ...@@ -338,18 +340,19 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
ThreadPool pool(10); ThreadPool pool(10);
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
pool.Schedule([&sampler, &stop]() { const size_t elt_size = 10 + i % 2;
pool.Schedule([&sampler, &stop, elt_size]() {
std::random_device rd; std::random_device rd;
std::mt19937 gen(rd()); std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz; std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) { while (!stop.HasBeenNotified()) {
if (infoz.empty()) { if (infoz.empty()) {
infoz.push_back(sampler.Register()); infoz.push_back(sampler.Register(elt_size));
} }
switch (std::uniform_int_distribution<>(0, 2)(gen)) { switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: { case 0: {
infoz.push_back(sampler.Register()); infoz.push_back(sampler.Register(elt_size));
break; break;
} }
case 1: { case 1: {
......
...@@ -2075,8 +2075,7 @@ TEST(RawHashSamplerTest, Sample) { ...@@ -2075,8 +2075,7 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++; std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
} }
EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
sizeof(int64_t));
++end_size; ++end_size;
}); });
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <utility> #include <utility>
#include "absl/algorithm/container.h" #include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h" #include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/node_slot_policy.h"
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <type_traits> #include <type_traits>
#include "absl/algorithm/container.h" #include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
......
...@@ -51,8 +51,7 @@ void TestInlineElementSize( ...@@ -51,8 +51,7 @@ void TestInlineElementSize(
size_t new_count = 0; size_t new_count = 0;
sampler.Iterate([&](const HashtablezInfo& info) { sampler.Iterate([&](const HashtablezInfo& info) {
if (preexisting_info.insert(&info).second) { if (preexisting_info.insert(&info).second) {
EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), EXPECT_EQ(info.inline_element_size, expected_element_size);
expected_element_size);
++new_count; ++new_count;
} }
}); });
......
...@@ -69,7 +69,8 @@ class FunctionRef; ...@@ -69,7 +69,8 @@ class FunctionRef;
// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with // An `absl::FunctionRef` is a lightweight wrapper to any invokable object with
// a compatible signature. Generally, an `absl::FunctionRef` should only be used // a compatible signature. Generally, an `absl::FunctionRef` should only be used
// as an argument type and should be preferred as an argument over a const // as an argument type and should be preferred as an argument over a const
// reference to a `std::function`. // reference to a `std::function`. `absl::FunctionRef` itself does not allocate,
// although the wrapped invokable may.
// //
// Example: // Example:
// //
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "absl/functional/function_ref.h" #include "absl/functional/function_ref.h"
#include <functional>
#include <memory> #include <memory>
#include "gmock/gmock.h" #include "gmock/gmock.h"
......
...@@ -59,7 +59,8 @@ class SampleRecorder { ...@@ -59,7 +59,8 @@ class SampleRecorder {
~SampleRecorder(); ~SampleRecorder();
// Registers for sampling. Returns an opaque registration info. // Registers for sampling. Returns an opaque registration info.
T* Register(); template <typename... Targs>
T* Register(Targs&&... args);
// Unregisters the sample. // Unregisters the sample.
void Unregister(T* sample); void Unregister(T* sample);
...@@ -81,7 +82,8 @@ class SampleRecorder { ...@@ -81,7 +82,8 @@ class SampleRecorder {
private: private:
void PushNew(T* sample); void PushNew(T* sample);
void PushDead(T* sample); void PushDead(T* sample);
T* PopDead(); template <typename... Targs>
T* PopDead(Targs... args);
std::atomic<size_t> dropped_samples_; std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_; std::atomic<size_t> size_estimate_;
...@@ -163,7 +165,8 @@ void SampleRecorder<T>::PushDead(T* sample) { ...@@ -163,7 +165,8 @@ void SampleRecorder<T>::PushDead(T* sample) {
} }
template <typename T> template <typename T>
T* SampleRecorder<T>::PopDead() { template <typename... Targs>
T* SampleRecorder<T>::PopDead(Targs... args) {
absl::MutexLock graveyard_lock(&graveyard_.init_mu); absl::MutexLock graveyard_lock(&graveyard_.init_mu);
// The list is circular, so eventually it collapses down to // The list is circular, so eventually it collapses down to
...@@ -175,12 +178,13 @@ T* SampleRecorder<T>::PopDead() { ...@@ -175,12 +178,13 @@ T* SampleRecorder<T>::PopDead() {
absl::MutexLock sample_lock(&sample->init_mu); absl::MutexLock sample_lock(&sample->init_mu);
graveyard_.dead = sample->dead; graveyard_.dead = sample->dead;
sample->dead = nullptr; sample->dead = nullptr;
sample->PrepareForSampling(); sample->PrepareForSampling(std::forward<Targs>(args)...);
return sample; return sample;
} }
template <typename T> template <typename T>
T* SampleRecorder<T>::Register() { template <typename... Targs>
T* SampleRecorder<T>::Register(Targs&&... args) {
int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > max_samples_.load(std::memory_order_relaxed)) { if (size > max_samples_.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed); size_estimate_.fetch_sub(1, std::memory_order_relaxed);
...@@ -188,10 +192,14 @@ T* SampleRecorder<T>::Register() { ...@@ -188,10 +192,14 @@ T* SampleRecorder<T>::Register() {
return nullptr; return nullptr;
} }
T* sample = PopDead(); T* sample = PopDead(args...);
if (sample == nullptr) { if (sample == nullptr) {
// Resurrection failed. Hire a new warlock. // Resurrection failed. Hire a new warlock.
sample = new T(); sample = new T();
{
absl::MutexLock sample_lock(&sample->init_mu);
sample->PrepareForSampling(std::forward<Targs>(args)...);
}
PushNew(sample); PushNew(sample);
} }
......
...@@ -468,46 +468,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region, ...@@ -468,46 +468,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
return true; return true;
} }
template <bool has_length>
void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
size_t length) {
auto constexpr method = CordzUpdateTracker::kGetAppendRegion;
CordRep* root = tree();
size_t sz = root ? root->length : inline_size();
if (root == nullptr) {
size_t available = kMaxInline - sz;
if (available >= (has_length ? length : 1)) {
*region = data_.as_chars() + sz;
*size = has_length ? length : available;
set_inline_size(has_length ? sz + length : kMaxInline);
return;
}
}
size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength);
CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method);
CordRep* rep = root ? cord_internal::RemoveCrcNode(root)
: MakeFlatWithExtraCapacity(extra);
if (PrepareAppendRegion(rep, region, size, length)) {
CommitTree(root, rep, scope, method);
return;
}
// Allocate new node.
CordRepFlat* new_node = CordRepFlat::New(extra);
new_node->length = std::min(new_node->Capacity(), length);
*region = new_node->Data();
*size = new_node->length;
if (btree_enabled()) {
rep = CordRepBtree::Append(ForceBtree(rep), new_node);
} else {
rep = Concat(rep, new_node);
}
CommitTree(root, rep, scope, method);
}
// Computes the memory side of the provided edge which must be a valid data edge // Computes the memory side of the provided edge which must be a valid data edge
// for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node. // for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node.
static bool RepMemoryUsageDataEdge(const CordRep* rep, static bool RepMemoryUsageDataEdge(const CordRep* rep,
......
...@@ -39,8 +39,8 @@ class CordzUpdateTracker { ...@@ -39,8 +39,8 @@ class CordzUpdateTracker {
// Tracked update methods. // Tracked update methods.
enum MethodIdentifier { enum MethodIdentifier {
kUnknown, kUnknown,
kAppendBuffer,
kAppendCord, kAppendCord,
kAppendCordBuffer,
kAppendExternalMemory, kAppendExternalMemory,
kAppendString, kAppendString,
kAssignCord, kAssignCord,
...@@ -50,13 +50,14 @@ class CordzUpdateTracker { ...@@ -50,13 +50,14 @@ class CordzUpdateTracker {
kConstructorString, kConstructorString,
kCordReader, kCordReader,
kFlatten, kFlatten,
kGetAppendBuffer,
kGetAppendRegion, kGetAppendRegion,
kMakeCordFromExternal, kMakeCordFromExternal,
kMoveAppendCord, kMoveAppendCord,
kMoveAssignCord, kMoveAssignCord,
kMovePrependCord, kMovePrependCord,
kPrependBuffer,
kPrependCord, kPrependCord,
kPrependCordBuffer,
kPrependString, kPrependString,
kRemovePrefix, kRemovePrefix,
kRemoveSuffix, kRemoveSuffix,
......
...@@ -37,8 +37,8 @@ using Methods = std::array<Method, Method::kNumMethods>; ...@@ -37,8 +37,8 @@ using Methods = std::array<Method, Method::kNumMethods>;
// Returns an array of all methods defined in `MethodIdentifier` // Returns an array of all methods defined in `MethodIdentifier`
Methods AllMethods() { Methods AllMethods() {
return Methods{Method::kUnknown, return Methods{Method::kUnknown,
Method::kAppendBuffer,
Method::kAppendCord, Method::kAppendCord,
Method::kAppendCordBuffer,
Method::kAppendExternalMemory, Method::kAppendExternalMemory,
Method::kAppendString, Method::kAppendString,
Method::kAssignCord, Method::kAssignCord,
...@@ -48,13 +48,14 @@ Methods AllMethods() { ...@@ -48,13 +48,14 @@ Methods AllMethods() {
Method::kConstructorString, Method::kConstructorString,
Method::kCordReader, Method::kCordReader,
Method::kFlatten, Method::kFlatten,
Method::kGetAppendBuffer,
Method::kGetAppendRegion, Method::kGetAppendRegion,
Method::kMakeCordFromExternal, Method::kMakeCordFromExternal,
Method::kMoveAppendCord, Method::kMoveAppendCord,
Method::kMoveAssignCord, Method::kMoveAssignCord,
Method::kMovePrependCord, Method::kMovePrependCord,
Method::kPrependBuffer,
Method::kPrependCord, Method::kPrependCord,
Method::kPrependCordBuffer,
Method::kPrependString, Method::kPrependString,
Method::kRemovePrefix, Method::kRemovePrefix,
Method::kRemoveSuffix, Method::kRemoveSuffix,
......
...@@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept { ...@@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept {
return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
} }
CONSTEXPR_F int year_index(year_t y, month_t m) noexcept { CONSTEXPR_F int year_index(year_t y, month_t m) noexcept {
return (static_cast<int>((y + (m > 2)) % 400) + 400) % 400; const int yi = static_cast<int>((y + (m > 2)) % 400);
return yi < 0 ? yi + 400 : yi;
} }
CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept { CONSTEXPR_F int days_per_century(int yi) noexcept {
const int yi = year_index(y, m);
return 36524 + (yi == 0 || yi > 300); return 36524 + (yi == 0 || yi > 300);
} }
CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept { CONSTEXPR_F int days_per_4years(int yi) noexcept {
const int yi = year_index(y, m);
return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96); return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96);
} }
CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept { CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept {
...@@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh, ...@@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh,
} }
} }
if (d > 365) { if (d > 365) {
int yi = year_index(ey, m); // Index into Gregorian 400 year cycle.
for (;;) { for (;;) {
int n = days_per_century(ey, m); int n = days_per_century(yi);
if (d <= n) break; if (d <= n) break;
d -= n; d -= n;
ey += 100; ey += 100;
yi += 100;
if (yi >= 400) yi -= 400;
} }
for (;;) { for (;;) {
int n = days_per_4years(ey, m); int n = days_per_4years(yi);
if (d <= n) break; if (d <= n) break;
d -= n; d -= n;
ey += 4; ey += 4;
yi += 4;
if (yi >= 400) yi -= 400;
} }
for (;;) { for (;;) {
int n = days_per_year(ey, m); int n = days_per_year(ey, m);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment