Commit 3aa3377e by Derek Mauro Committed by Copybara-Service

Fixed Windows DLL builds of test targets

This is a heavily modified version of
https://github.com/abseil/abseil-cpp/pull/1445,
which adds some missing test libraries to the test DLL.

Unlike #1445, this change moves several global variables out of
headers that did not need to be in headers.

For instance, cord_btree_exhaustive_validation was a global
defined/declared in cord_internal, but only used in cord_rep_btree
and its test.

cordz_handle defined a queue in its header even though it wasn't needed,
which also led to ODR problems.

The Spinlock used in CordzHandle is replaced with a Mutex. This was
originally a Mutex, but Chromium asked us to change it to a Spinlock
to avoid a static initializer. After this change, the static
initializer is no longer an issue.

#1407

PiperOrigin-RevId: 531516991
Change-Id: I0e431a193698b20ba03fac6e414c26f153f330a7
parent 2526926b
...@@ -589,6 +589,10 @@ set(ABSL_INTERNAL_TEST_DLL_FILES ...@@ -589,6 +589,10 @@ set(ABSL_INTERNAL_TEST_DLL_FILES
"hash/hash_testing.h" "hash/hash_testing.h"
"log/scoped_mock_log.cc" "log/scoped_mock_log.cc"
"log/scoped_mock_log.h" "log/scoped_mock_log.h"
"random/internal/chi_square.cc"
"random/internal/chi_square.h"
"random/internal/distribution_test_util.cc"
"random/internal/distribution_test_util.h"
"random/internal/mock_helpers.h" "random/internal/mock_helpers.h"
"random/internal/mock_overload_set.h" "random/internal/mock_overload_set.h"
"random/mocking_bit_gen.h" "random/mocking_bit_gen.h"
......
...@@ -413,6 +413,10 @@ function(absl_cc_test) ...@@ -413,6 +413,10 @@ function(absl_cc_test)
DEPS ${ABSL_CC_TEST_DEPS} DEPS ${ABSL_CC_TEST_DEPS}
OUTPUT ABSL_CC_TEST_DEPS OUTPUT ABSL_CC_TEST_DEPS
) )
absl_internal_dll_targets(
DEPS ${ABSL_CC_TEST_LINKOPTS}
OUTPUT ABSL_CC_TEST_LINKOPTS
)
else() else()
target_compile_definitions(${_NAME} target_compile_definitions(${_NAME}
PUBLIC PUBLIC
......
...@@ -33,7 +33,6 @@ ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled( ...@@ -33,7 +33,6 @@ ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
kCordEnableRingBufferDefault); kCordEnableRingBufferDefault);
ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled( ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
kCordShallowSubcordsDefault); kCordShallowSubcordsDefault);
ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
void LogFatalNodeType(CordRep* rep) { void LogFatalNodeType(CordRep* rep) {
ABSL_INTERNAL_LOG(FATAL, absl::StrCat("Unexpected node type: ", ABSL_INTERNAL_LOG(FATAL, absl::StrCat("Unexpected node type: ",
......
...@@ -69,12 +69,6 @@ enum CordFeatureDefaults { ...@@ -69,12 +69,6 @@ enum CordFeatureDefaults {
extern std::atomic<bool> cord_ring_buffer_enabled; extern std::atomic<bool> cord_ring_buffer_enabled;
extern std::atomic<bool> shallow_subcords_enabled; extern std::atomic<bool> shallow_subcords_enabled;
// `cord_btree_exhaustive_validation` can be set to force exhaustive validation
// in debug assertions, and code that calls `IsValid()` explicitly. By default,
// assertions should be relatively cheap and AssertValid() can easily lead to
// O(n^2) complexity as recursive / full tree validation is O(n).
extern std::atomic<bool> cord_btree_exhaustive_validation;
inline void enable_cord_ring_buffer(bool enable) { inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed); cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "absl/strings/internal/cord_rep_btree.h" #include "absl/strings/internal/cord_rep_btree.h"
#include <atomic>
#include <cassert> #include <cassert>
#include <cstdint> #include <cstdint>
#include <iostream> #include <iostream>
...@@ -49,9 +50,7 @@ using CopyResult = CordRepBtree::CopyResult; ...@@ -49,9 +50,7 @@ using CopyResult = CordRepBtree::CopyResult;
constexpr auto kFront = CordRepBtree::kFront; constexpr auto kFront = CordRepBtree::kFront;
constexpr auto kBack = CordRepBtree::kBack; constexpr auto kBack = CordRepBtree::kBack;
inline bool exhaustive_validation() { ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
}
// Implementation of the various 'Dump' functions. // Implementation of the various 'Dump' functions.
// Prints the entire tree structure or 'rep'. External callers should // Prints the entire tree structure or 'rep'. External callers should
...@@ -362,6 +361,15 @@ struct StackOperations { ...@@ -362,6 +361,15 @@ struct StackOperations {
} // namespace } // namespace
void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation) {
cord_btree_exhaustive_validation.store(do_exaustive_validation,
std::memory_order_relaxed);
}
bool IsCordBtreeExhaustiveValidationEnabled() {
return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
}
void CordRepBtree::Dump(const CordRep* rep, absl::string_view label, void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
bool include_contents, std::ostream& stream) { bool include_contents, std::ostream& stream) {
stream << "===================================\n"; stream << "===================================\n";
...@@ -450,7 +458,8 @@ bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) { ...@@ -450,7 +458,8 @@ bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
child_length += edge->length; child_length += edge->length;
} }
NODE_CHECK_EQ(child_length, tree->length); NODE_CHECK_EQ(child_length, tree->length);
if ((!shallow || exhaustive_validation()) && tree->height() > 0) { if ((!shallow || IsCordBtreeExhaustiveValidationEnabled()) &&
tree->height() > 0) {
for (CordRep* edge : tree->Edges()) { for (CordRep* edge : tree->Edges()) {
if (!IsValid(edge->btree(), shallow)) return false; if (!IsValid(edge->btree(), shallow)) return false;
} }
......
...@@ -32,6 +32,14 @@ namespace absl { ...@@ -32,6 +32,14 @@ namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace cord_internal { namespace cord_internal {
// `SetCordBtreeExhaustiveValidation()` can be set to force exhaustive
// validation in debug assertions, and code that calls `IsValid()`
// explicitly. By default, assertions should be relatively cheap and
// AssertValid() can easily lead to O(n^2) complexity as recursive / full tree
// validation is O(n).
void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation);
bool IsCordBtreeExhaustiveValidationEnabled();
class CordRepBtreeNavigator; class CordRepBtreeNavigator;
// CordRepBtree is as the name implies a btree implementation of a Cordrep tree. // CordRepBtree is as the name implies a btree implementation of a Cordrep tree.
......
...@@ -1355,9 +1355,9 @@ TEST(CordRepBtreeTest, AssertValid) { ...@@ -1355,9 +1355,9 @@ TEST(CordRepBtreeTest, AssertValid) {
TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) { TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
// Restore exhaustive validation on any exit. // Restore exhaustive validation on any exit.
const bool exhaustive_validation = cord_btree_exhaustive_validation.load(); const bool exhaustive_validation = IsCordBtreeExhaustiveValidationEnabled();
auto cleanup = absl::MakeCleanup([exhaustive_validation] { auto cleanup = absl::MakeCleanup([exhaustive_validation] {
cord_btree_exhaustive_validation.store(exhaustive_validation); SetCordBtreeExhaustiveValidation(exhaustive_validation);
}); });
// Create a tree of at least 2 levels, and mess with the original flat, which // Create a tree of at least 2 levels, and mess with the original flat, which
...@@ -1372,7 +1372,7 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) { ...@@ -1372,7 +1372,7 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
} }
flat->length = 100; flat->length = 100;
cord_btree_exhaustive_validation.store(false); SetCordBtreeExhaustiveValidation(false);
EXPECT_FALSE(CordRepBtree::IsValid(tree)); EXPECT_FALSE(CordRepBtree::IsValid(tree));
EXPECT_TRUE(CordRepBtree::IsValid(tree, true)); EXPECT_TRUE(CordRepBtree::IsValid(tree, true));
EXPECT_FALSE(CordRepBtree::IsValid(tree, false)); EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
...@@ -1382,7 +1382,7 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) { ...@@ -1382,7 +1382,7 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, false), ".*"); EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, false), ".*");
#endif #endif
cord_btree_exhaustive_validation.store(true); SetCordBtreeExhaustiveValidation(true);
EXPECT_FALSE(CordRepBtree::IsValid(tree)); EXPECT_FALSE(CordRepBtree::IsValid(tree));
EXPECT_FALSE(CordRepBtree::IsValid(tree, true)); EXPECT_FALSE(CordRepBtree::IsValid(tree, true));
EXPECT_FALSE(CordRepBtree::IsValid(tree, false)); EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
......
...@@ -16,34 +16,60 @@ ...@@ -16,34 +16,60 @@
#include <atomic> #include <atomic>
#include "absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK #include "absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK
#include "absl/base/internal/spinlock.h" #include "absl/synchronization/mutex.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
namespace cord_internal { namespace cord_internal {
using ::absl::base_internal::SpinLockHolder; namespace {
ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_(absl::kConstInit); struct Queue {
Queue() = default;
absl::Mutex mutex;
std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
// Returns true if this delete queue is empty. This method does not acquire
// the lock, but does a 'load acquire' observation on the delete queue tail.
// It is used inside Delete() to check for the presence of a delete queue
// without holding the lock. The assumption is that the caller is in the
// state of 'being deleted', and can not be newly discovered by a concurrent
// 'being constructed' snapshot instance. Practically, this means that any
// such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
// before / after' semantics and atomic fences.
bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
return dq_tail.load(std::memory_order_acquire) == nullptr;
}
};
static Queue* GlobalQueue() {
static Queue* global_queue = new Queue;
return global_queue;
}
} // namespace
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) { CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
Queue* global_queue = GlobalQueue();
if (is_snapshot) { if (is_snapshot) {
SpinLockHolder lock(&queue_->mutex); MutexLock lock(&global_queue->mutex);
CordzHandle* dq_tail = queue_->dq_tail.load(std::memory_order_acquire); CordzHandle* dq_tail =
global_queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) { if (dq_tail != nullptr) {
dq_prev_ = dq_tail; dq_prev_ = dq_tail;
dq_tail->dq_next_ = this; dq_tail->dq_next_ = this;
} }
queue_->dq_tail.store(this, std::memory_order_release); global_queue->dq_tail.store(this, std::memory_order_release);
} }
} }
CordzHandle::~CordzHandle() { CordzHandle::~CordzHandle() {
ODRCheck(); Queue* global_queue = GlobalQueue();
if (is_snapshot_) { if (is_snapshot_) {
std::vector<CordzHandle*> to_delete; std::vector<CordzHandle*> to_delete;
{ {
SpinLockHolder lock(&queue_->mutex); MutexLock lock(&global_queue->mutex);
CordzHandle* next = dq_next_; CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) { if (dq_prev_ == nullptr) {
// We were head of the queue, delete every CordzHandle until we reach // We were head of the queue, delete every CordzHandle until we reach
...@@ -59,7 +85,7 @@ CordzHandle::~CordzHandle() { ...@@ -59,7 +85,7 @@ CordzHandle::~CordzHandle() {
if (next) { if (next) {
next->dq_prev_ = dq_prev_; next->dq_prev_ = dq_prev_;
} else { } else {
queue_->dq_tail.store(dq_prev_, std::memory_order_release); global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
} }
} }
for (CordzHandle* handle : to_delete) { for (CordzHandle* handle : to_delete) {
...@@ -69,16 +95,15 @@ CordzHandle::~CordzHandle() { ...@@ -69,16 +95,15 @@ CordzHandle::~CordzHandle() {
} }
bool CordzHandle::SafeToDelete() const { bool CordzHandle::SafeToDelete() const {
return is_snapshot_ || queue_->IsEmpty(); return is_snapshot_ || GlobalQueue()->IsEmpty();
} }
void CordzHandle::Delete(CordzHandle* handle) { void CordzHandle::Delete(CordzHandle* handle) {
assert(handle); assert(handle);
if (handle) { if (handle) {
handle->ODRCheck(); Queue* const queue = GlobalQueue();
Queue* const queue = handle->queue_;
if (!handle->SafeToDelete()) { if (!handle->SafeToDelete()) {
SpinLockHolder lock(&queue->mutex); MutexLock lock(&queue->mutex);
CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire); CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) { if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail; handle->dq_prev_ = dq_tail;
...@@ -93,8 +118,9 @@ void CordzHandle::Delete(CordzHandle* handle) { ...@@ -93,8 +118,9 @@ void CordzHandle::Delete(CordzHandle* handle) {
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() { std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles; std::vector<const CordzHandle*> handles;
SpinLockHolder lock(&global_queue_.mutex); Queue* global_queue = GlobalQueue();
CordzHandle* dq_tail = global_queue_.dq_tail.load(std::memory_order_acquire); MutexLock lock(&global_queue->mutex);
CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) { for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p); handles.push_back(p);
} }
...@@ -103,13 +129,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() { ...@@ -103,13 +129,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
bool CordzHandle::DiagnosticsHandleIsSafeToInspect( bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
const CordzHandle* handle) const { const CordzHandle* handle) const {
ODRCheck();
if (!is_snapshot_) return false; if (!is_snapshot_) return false;
if (handle == nullptr) return true; if (handle == nullptr) return true;
if (handle->is_snapshot_) return false; if (handle->is_snapshot_) return false;
bool snapshot_found = false; bool snapshot_found = false;
SpinLockHolder lock(&queue_->mutex); Queue* global_queue = GlobalQueue();
for (const CordzHandle* p = queue_->dq_tail; p; p = p->dq_prev_) { MutexLock lock(&global_queue->mutex);
for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found; if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true; if (p == this) snapshot_found = true;
} }
...@@ -119,13 +145,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect( ...@@ -119,13 +145,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
std::vector<const CordzHandle*> std::vector<const CordzHandle*>
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() { CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
ODRCheck();
std::vector<const CordzHandle*> handles; std::vector<const CordzHandle*> handles;
if (!is_snapshot()) { if (!is_snapshot()) {
return handles; return handles;
} }
SpinLockHolder lock(&queue_->mutex); Queue* global_queue = GlobalQueue();
MutexLock lock(&global_queue->mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) { for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) { if (!p->is_snapshot()) {
handles.push_back(p); handles.push_back(p);
......
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/synchronization/mutex.h"
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
...@@ -79,37 +77,6 @@ class CordzHandle { ...@@ -79,37 +77,6 @@ class CordzHandle {
virtual ~CordzHandle(); virtual ~CordzHandle();
private: private:
// Global queue data. CordzHandle stores a pointer to the global queue
// instance to harden against ODR violations.
struct Queue {
constexpr explicit Queue(absl::ConstInitType)
: mutex(absl::kConstInit,
absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
absl::base_internal::SpinLock mutex;
std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
// Returns true if this delete queue is empty. This method does not acquire
// the lock, but does a 'load acquire' observation on the delete queue tail.
// It is used inside Delete() to check for the presence of a delete queue
// without holding the lock. The assumption is that the caller is in the
// state of 'being deleted', and can not be newly discovered by a concurrent
// 'being constructed' snapshot instance. Practically, this means that any
// such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
// before / after' semantics and atomic fences.
bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
return dq_tail.load(std::memory_order_acquire) == nullptr;
}
};
void ODRCheck() const {
#ifndef NDEBUG
ABSL_RAW_CHECK(queue_ == &global_queue_, "ODR violation in Cord");
#endif
}
ABSL_CONST_INIT static Queue global_queue_;
Queue* const queue_ = &global_queue_;
const bool is_snapshot_; const bool is_snapshot_;
// dq_prev_ and dq_next_ require the global queue mutex to be held. // dq_prev_ and dq_next_ require the global queue mutex to be held.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment