Commit cffa80b9 by Abseil Team Committed by Copybara-Service

absl: reformat Mutex-related files

Reformat Mutex-related files so that incremental formatting changes
don't distract during review of logical changes.
These files are subtle and any unnecessary diffs make reviews harder.

No changes besides running clang-format.

PiperOrigin-RevId: 541981737
Change-Id: I41cccb7a97158c78d17adaff6fe553c2c9c2b9ed
parent 5668c20e
...@@ -58,18 +58,19 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { ...@@ -58,18 +58,19 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// that protected visibility is unsupported. // that protected visibility is unsupported.
ABSL_CONST_INIT // Must come before __attribute__((visibility("protected"))) ABSL_CONST_INIT // Must come before __attribute__((visibility("protected")))
#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) #if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected"))) __attribute__((visibility("protected")))
#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) #endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS #if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster. // Prefer __thread to thread_local as benchmarks indicate it is a bit
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; // faster.
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
#elif defined(ABSL_HAVE_THREAD_LOCAL) #elif defined(ABSL_HAVE_THREAD_LOCAL)
thread_local ThreadIdentity* thread_identity_ptr = nullptr; thread_local ThreadIdentity* thread_identity_ptr = nullptr;
#endif // ABSL_PER_THREAD_TLS #endif // ABSL_PER_THREAD_TLS
#endif // TLS or CPP11 #endif // TLS or CPP11
void SetCurrentThreadIdentity( void SetCurrentThreadIdentity(ThreadIdentity* identity,
ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) { ThreadIdentityReclaimerFunction reclaimer) {
assert(CurrentThreadIdentityIfPresent() == nullptr); assert(CurrentThreadIdentityIfPresent() == nullptr);
// Associate our destructor. // Associate our destructor.
// NOTE: This call to pthread_setspecific is currently the only immovable // NOTE: This call to pthread_setspecific is currently the only immovable
......
...@@ -62,8 +62,8 @@ struct PerThreadSynch { ...@@ -62,8 +62,8 @@ struct PerThreadSynch {
return reinterpret_cast<ThreadIdentity*>(this); return reinterpret_cast<ThreadIdentity*>(this);
} }
PerThreadSynch *next; // Circular waiter queue; initialized to 0. PerThreadSynch* next; // Circular waiter queue; initialized to 0.
PerThreadSynch *skip; // If non-zero, all entries in Mutex queue PerThreadSynch* skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same // up to and including "skip" have same
// condition as this, and will be woken later // condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker bool may_skip; // if false while on mutex queue, a mutex unlocker
...@@ -104,10 +104,7 @@ struct PerThreadSynch { ...@@ -104,10 +104,7 @@ struct PerThreadSynch {
// //
// Transitions from kAvailable to kQueued require no barrier, they // Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex. // are externally ordered by the Mutex.
enum State { enum State { kAvailable, kQueued };
kAvailable,
kQueued
};
std::atomic<State> state; std::atomic<State> state;
// The wait parameters of the current wait. waitp is null if the // The wait parameters of the current wait. waitp is null if the
...@@ -129,7 +126,7 @@ struct PerThreadSynch { ...@@ -129,7 +126,7 @@ struct PerThreadSynch {
// Locks held; used during deadlock detection. // Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks; SynchLocksHeld* all_locks;
}; };
// The instances of this class are allocated in NewThreadIdentity() with an // The instances of this class are allocated in NewThreadIdentity() with an
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include <stdint.h> #include <stdint.h>
#include <new> #include <new>
// This file is a no-op if the required LowLevelAlloc support is missing. // This file is a no-op if the required LowLevelAlloc support is missing.
......
...@@ -98,15 +98,15 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ...@@ -98,15 +98,15 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data; submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)( ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
const char *msg, const void *obj, int64_t wait_cycles)> const char* msg, const void* obj, int64_t wait_cycles)>
mutex_tracer; mutex_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)> absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
cond_var_tracer; cond_var_tracer;
} // namespace } // namespace
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock, bool locking, bool trylock,
bool read_lock); bool read_lock);
...@@ -114,12 +114,12 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) { ...@@ -114,12 +114,12 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn); submit_profile_data.Store(fn);
} }
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles)) { int64_t wait_cycles)) {
mutex_tracer.Store(fn); mutex_tracer.Store(fn);
} }
void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) { void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
cond_var_tracer.Store(fn); cond_var_tracer.Store(fn);
} }
...@@ -141,7 +141,7 @@ absl::Duration MeasureTimeToYield() { ...@@ -141,7 +141,7 @@ absl::Duration MeasureTimeToYield() {
return absl::Now() - before; return absl::Now() - before;
} }
const MutexGlobals &GetMutexGlobals() { const MutexGlobals& GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data; ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() { absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
const int num_cpus = absl::base_internal::NumCPUs(); const int num_cpus = absl::base_internal::NumCPUs();
...@@ -212,8 +212,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits, ...@@ -212,8 +212,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed); v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != bits && } while ((v & bits) != bits &&
((v & wait_until_clear) != 0 || ((v & wait_until_clear) != 0 ||
!pv->compare_exchange_weak(v, v | bits, !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_release,
std::memory_order_relaxed))); std::memory_order_relaxed)));
} }
...@@ -228,8 +227,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits, ...@@ -228,8 +227,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed); v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != 0 && } while ((v & bits) != 0 &&
((v & wait_until_clear) != 0 || ((v & wait_until_clear) != 0 ||
!pv->compare_exchange_weak(v, v & ~bits, !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
std::memory_order_release,
std::memory_order_relaxed))); std::memory_order_relaxed)));
} }
...@@ -240,7 +238,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu( ...@@ -240,7 +238,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Graph used to detect deadlocks. // Graph used to detect deadlocks.
ABSL_CONST_INIT static GraphCycles *deadlock_graph ABSL_CONST_INIT static GraphCycles* deadlock_graph
ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu); ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------ //------------------------------------------------------------------
...@@ -284,7 +282,7 @@ enum { // Event flags ...@@ -284,7 +282,7 @@ enum { // Event flags
// Properties of the events. // Properties of the events.
static const struct { static const struct {
int flags; int flags;
const char *msg; const char* msg;
} event_properties[] = { } event_properties[] = {
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "}, {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
{0, "TryLock failed "}, {0, "TryLock failed "},
...@@ -314,7 +312,7 @@ static struct SynchEvent { // this is a trivial hash table for the events ...@@ -314,7 +312,7 @@ static struct SynchEvent { // this is a trivial hash table for the events
int refcount ABSL_GUARDED_BY(synch_event_mu); int refcount ABSL_GUARDED_BY(synch_event_mu);
// buckets have linear, 0-terminated chains // buckets have linear, 0-terminated chains
SynchEvent *next ABSL_GUARDED_BY(synch_event_mu); SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
// Constant after initialization // Constant after initialization
uintptr_t masked_addr; // object at this address is called "name" uintptr_t masked_addr; // object at this address is called "name"
...@@ -322,13 +320,13 @@ static struct SynchEvent { // this is a trivial hash table for the events ...@@ -322,13 +320,13 @@ static struct SynchEvent { // this is a trivial hash table for the events
// No explicit synchronization used. Instead we assume that the // No explicit synchronization used. Instead we assume that the
// client who enables/disables invariants/logging on a Mutex does so // client who enables/disables invariants/logging on a Mutex does so
// while the Mutex is not being concurrently accessed by others. // while the Mutex is not being concurrently accessed by others.
void (*invariant)(void *arg); // called on each event void (*invariant)(void* arg); // called on each event
void *arg; // first arg to (*invariant)() void* arg; // first arg to (*invariant)()
bool log; // logging turned on bool log; // logging turned on
// Constant after initialization // Constant after initialization
char name[1]; // actually longer---NUL-terminated string char name[1]; // actually longer---NUL-terminated string
} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu); }* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it, // Ensure that the object at "addr" has a SynchEvent struct associated with it,
// set "bits" in the word there (waiting until lockbit is clear before doing // set "bits" in the word there (waiting until lockbit is clear before doing
...@@ -337,11 +335,11 @@ static struct SynchEvent { // this is a trivial hash table for the events ...@@ -337,11 +335,11 @@ static struct SynchEvent { // this is a trivial hash table for the events
// the string name is copied into it. // the string name is copied into it.
// When used with a mutex, the caller should also ensure that kMuEvent // When used with a mutex, the caller should also ensure that kMuEvent
// is set in the mutex word, and similarly for condition variables and kCVEvent. // is set in the mutex word, and similarly for condition variables and kCVEvent.
static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
const char *name, intptr_t bits, const char* name, intptr_t bits,
intptr_t lockbit) { intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent *e; SynchEvent* e;
// first look for existing SynchEvent struct.. // first look for existing SynchEvent struct..
synch_event_mu.Lock(); synch_event_mu.Lock();
for (e = synch_event[h]; for (e = synch_event[h];
...@@ -353,7 +351,7 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, ...@@ -353,7 +351,7 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
name = ""; name = "";
} }
size_t l = strlen(name); size_t l = strlen(name);
e = reinterpret_cast<SynchEvent *>( e = reinterpret_cast<SynchEvent*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l)); base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
e->refcount = 2; // one for return value, one for linked list e->refcount = 2; // one for return value, one for linked list
e->masked_addr = base_internal::HidePtr(addr); e->masked_addr = base_internal::HidePtr(addr);
...@@ -372,12 +370,12 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, ...@@ -372,12 +370,12 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
} }
// Deallocate the SynchEvent *e, whose refcount has fallen to zero. // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
static void DeleteSynchEvent(SynchEvent *e) { static void DeleteSynchEvent(SynchEvent* e) {
base_internal::LowLevelAlloc::Free(e); base_internal::LowLevelAlloc::Free(e);
} }
// Decrement the reference count of *e, or do nothing if e==null. // Decrement the reference count of *e, or do nothing if e==null.
static void UnrefSynchEvent(SynchEvent *e) { static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) { if (e != nullptr) {
synch_event_mu.Lock(); synch_event_mu.Lock();
bool del = (--(e->refcount) == 0); bool del = (--(e->refcount) == 0);
...@@ -391,11 +389,11 @@ static void UnrefSynchEvent(SynchEvent *e) { ...@@ -391,11 +389,11 @@ static void UnrefSynchEvent(SynchEvent *e) {
// Forget the mapping from the object (Mutex or CondVar) at address addr // Forget the mapping from the object (Mutex or CondVar) at address addr
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
// is clear before doing so). // is clear before doing so).
static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
intptr_t lockbit) { intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent **pe; SynchEvent** pe;
SynchEvent *e; SynchEvent* e;
synch_event_mu.Lock(); synch_event_mu.Lock();
for (pe = &synch_event[h]; for (pe = &synch_event[h];
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr); (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
...@@ -416,9 +414,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, ...@@ -416,9 +414,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
// Return a refcounted reference to the SynchEvent of the object at address // Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called. // called.
static SynchEvent *GetSynchEvent(const void *addr) { static SynchEvent* GetSynchEvent(const void* addr) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent *e; SynchEvent* e;
synch_event_mu.Lock(); synch_event_mu.Lock();
for (e = synch_event[h]; for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr); e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
...@@ -433,17 +431,17 @@ static SynchEvent *GetSynchEvent(const void *addr) { ...@@ -433,17 +431,17 @@ static SynchEvent *GetSynchEvent(const void *addr) {
// Called when an event "ev" occurs on a Mutex of CondVar "obj" // Called when an event "ev" occurs on a Mutex of CondVar "obj"
// if event recording is on // if event recording is on
static void PostSynchEvent(void *obj, int ev) { static void PostSynchEvent(void* obj, int ev) {
SynchEvent *e = GetSynchEvent(obj); SynchEvent* e = GetSynchEvent(obj);
// logging is on if event recording is on and either there's no event struct, // logging is on if event recording is on and either there's no event struct,
// or it explicitly says to log // or it explicitly says to log
if (e == nullptr || e->log) { if (e == nullptr || e->log) {
void *pcs[40]; void* pcs[40];
int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1); int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
// A buffer with enough space for the ASCII for all the PCs, even on a // A buffer with enough space for the ASCII for all the PCs, even on a
// 64-bit machine. // 64-bit machine.
char buffer[ABSL_ARRAYSIZE(pcs) * 24]; char buffer[ABSL_ARRAYSIZE(pcs) * 24];
int pos = snprintf(buffer, sizeof (buffer), " @"); int pos = snprintf(buffer, sizeof(buffer), " @");
for (int i = 0; i != n; i++) { for (int i = 0; i != n; i++) {
int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos), int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
" %p", pcs[i]); " %p", pcs[i]);
...@@ -465,13 +463,13 @@ static void PostSynchEvent(void *obj, int ev) { ...@@ -465,13 +463,13 @@ static void PostSynchEvent(void *obj, int ev) {
// get false positive race reports later. // get false positive race reports later.
// Reuse EvalConditionAnnotated to properly call into user code. // Reuse EvalConditionAnnotated to properly call into user code.
struct local { struct local {
static bool pred(SynchEvent *ev) { static bool pred(SynchEvent* ev) {
(*ev->invariant)(ev->arg); (*ev->invariant)(ev->arg);
return false; return false;
} }
}; };
Condition cond(&local::pred, e); Condition cond(&local::pred, e);
Mutex *mu = static_cast<Mutex *>(obj); Mutex* mu = static_cast<Mutex*>(obj);
const bool locking = (flags & SYNCH_F_UNLOCK) == 0; const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
const bool trylock = (flags & SYNCH_F_TRY) != 0; const bool trylock = (flags & SYNCH_F_TRY) != 0;
const bool read_lock = (flags & SYNCH_F_R) != 0; const bool read_lock = (flags & SYNCH_F_R) != 0;
...@@ -497,10 +495,10 @@ static void PostSynchEvent(void *obj, int ev) { ...@@ -497,10 +495,10 @@ static void PostSynchEvent(void *obj, int ev) {
// PerThreadSynch struct points at the most recent SynchWaitParams struct when // PerThreadSynch struct points at the most recent SynchWaitParams struct when
// the thread is on a Mutex's waiter queue. // the thread is on a Mutex's waiter queue.
struct SynchWaitParams { struct SynchWaitParams {
SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg, SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
KernelTimeout timeout_arg, Mutex *cvmu_arg, KernelTimeout timeout_arg, Mutex* cvmu_arg,
PerThreadSynch *thread_arg, PerThreadSynch* thread_arg,
std::atomic<intptr_t> *cv_word_arg) std::atomic<intptr_t>* cv_word_arg)
: how(how_arg), : how(how_arg),
cond(cond_arg), cond(cond_arg),
timeout(timeout_arg), timeout(timeout_arg),
...@@ -511,18 +509,18 @@ struct SynchWaitParams { ...@@ -511,18 +509,18 @@ struct SynchWaitParams {
should_submit_contention_data(false) {} should_submit_contention_data(false) {}
const Mutex::MuHow how; // How this thread needs to wait. const Mutex::MuHow how; // How this thread needs to wait.
const Condition *cond; // The condition that this thread is waiting for. const Condition* cond; // The condition that this thread is waiting for.
// In Mutex, this field is set to zero if a timeout // In Mutex, this field is set to zero if a timeout
// expires. // expires.
KernelTimeout timeout; // timeout expiry---absolute time KernelTimeout timeout; // timeout expiry---absolute time
// In Mutex, this field is set to zero if a timeout // In Mutex, this field is set to zero if a timeout
// expires. // expires.
Mutex *const cvmu; // used for transfer from cond var to mutex Mutex* const cvmu; // used for transfer from cond var to mutex
PerThreadSynch *const thread; // thread that is waiting PerThreadSynch* const thread; // thread that is waiting
// If not null, thread should be enqueued on the CondVar whose state // If not null, thread should be enqueued on the CondVar whose state
// word is cv_word instead of queueing normally on the Mutex. // word is cv_word instead of queueing normally on the Mutex.
std::atomic<intptr_t> *cv_word; std::atomic<intptr_t>* cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started int64_t contention_start_cycles; // Time (in cycles) when this thread started
// to contend for the mutex. // to contend for the mutex.
...@@ -533,7 +531,7 @@ struct SynchLocksHeld { ...@@ -533,7 +531,7 @@ struct SynchLocksHeld {
int n; // number of valid entries in locks[] int n; // number of valid entries in locks[]
bool overflow; // true iff we overflowed the array at some point bool overflow; // true iff we overflowed the array at some point
struct { struct {
Mutex *mu; // lock acquired Mutex* mu; // lock acquired
int32_t count; // times acquired int32_t count; // times acquired
GraphId id; // deadlock_graph id of acquired lock GraphId id; // deadlock_graph id of acquired lock
} locks[40]; } locks[40];
...@@ -545,11 +543,11 @@ struct SynchLocksHeld { ...@@ -545,11 +543,11 @@ struct SynchLocksHeld {
// A sentinel value in lists that is not 0. // A sentinel value in lists that is not 0.
// A 0 value is used to mean "not on a list". // A 0 value is used to mean "not on a list".
static PerThreadSynch *const kPerThreadSynchNull = static PerThreadSynch* const kPerThreadSynchNull =
reinterpret_cast<PerThreadSynch *>(1); reinterpret_cast<PerThreadSynch*>(1);
static SynchLocksHeld *LocksHeldAlloc() { static SynchLocksHeld* LocksHeldAlloc() {
SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>( SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld))); base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
ret->n = 0; ret->n = 0;
ret->overflow = false; ret->overflow = false;
...@@ -557,24 +555,24 @@ static SynchLocksHeld *LocksHeldAlloc() { ...@@ -557,24 +555,24 @@ static SynchLocksHeld *LocksHeldAlloc() {
} }
// Return the PerThreadSynch-struct for this thread. // Return the PerThreadSynch-struct for this thread.
static PerThreadSynch *Synch_GetPerThread() { static PerThreadSynch* Synch_GetPerThread() {
ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity(); ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
return &identity->per_thread_synch; return &identity->per_thread_synch;
} }
static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) { static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
if (mu) { if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
} }
PerThreadSynch *w = Synch_GetPerThread(); PerThreadSynch* w = Synch_GetPerThread();
if (mu) { if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0); ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
} }
return w; return w;
} }
static SynchLocksHeld *Synch_GetAllLocks() { static SynchLocksHeld* Synch_GetAllLocks() {
PerThreadSynch *s = Synch_GetPerThread(); PerThreadSynch* s = Synch_GetPerThread();
if (s->all_locks == nullptr) { if (s->all_locks == nullptr) {
s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity. s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
} }
...@@ -582,7 +580,7 @@ static SynchLocksHeld *Synch_GetAllLocks() { ...@@ -582,7 +580,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
} }
// Post on "w"'s associated PerThreadSem. // Post on "w"'s associated PerThreadSem.
void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
if (mu) { if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
// We miss synchronization around passing PerThreadSynch between threads // We miss synchronization around passing PerThreadSynch between threads
...@@ -598,7 +596,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { ...@@ -598,7 +596,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
} }
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired. // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) { bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
if (mu) { if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
} }
...@@ -619,7 +617,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) { ...@@ -619,7 +617,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
// Mutex code checking that the "waitp" field has not been reused. // Mutex code checking that the "waitp" field has not been reused.
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() { void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
// Fix the per-thread state only if it exists. // Fix the per-thread state only if it exists.
ThreadIdentity *identity = CurrentThreadIdentityIfPresent(); ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
if (identity != nullptr) { if (identity != nullptr) {
identity->per_thread_synch.suppress_fatal_errors = true; identity->per_thread_synch.suppress_fatal_errors = true;
} }
...@@ -756,8 +754,8 @@ Mutex::~Mutex() { ...@@ -756,8 +754,8 @@ Mutex::~Mutex() {
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
} }
void Mutex::EnableDebugLog(const char *name) { void Mutex::EnableDebugLog(const char* name) {
SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin); SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true; e->log = true;
UnrefSynchEvent(e); UnrefSynchEvent(e);
} }
...@@ -766,11 +764,10 @@ void EnableMutexInvariantDebugging(bool enabled) { ...@@ -766,11 +764,10 @@ void EnableMutexInvariantDebugging(bool enabled) {
synch_check_invariants.store(enabled, std::memory_order_release); synch_check_invariants.store(enabled, std::memory_order_release);
} }
void Mutex::EnableInvariantDebugging(void (*invariant)(void *), void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
void *arg) {
if (synch_check_invariants.load(std::memory_order_acquire) && if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) { invariant != nullptr) {
SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin); SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
e->invariant = invariant; e->invariant = invariant;
e->arg = arg; e->arg = arg;
UnrefSynchEvent(e); UnrefSynchEvent(e);
...@@ -786,15 +783,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) { ...@@ -786,15 +783,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
// waiters with the same condition, type of lock, and thread priority. // waiters with the same condition, type of lock, and thread priority.
// //
// Requires that x and y be waiting on the same Mutex queue. // Requires that x and y be waiting on the same Mutex queue.
static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) { static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority && return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond); Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
} }
// Given the contents of a mutex word containing a PerThreadSynch pointer, // Given the contents of a mutex word containing a PerThreadSynch pointer,
// return the pointer. // return the pointer.
static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) { static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
return reinterpret_cast<PerThreadSynch *>(v & kMuHigh); return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
} }
// The next several routines maintain the per-thread next and skip fields // The next several routines maintain the per-thread next and skip fields
...@@ -852,10 +849,10 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) { ...@@ -852,10 +849,10 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// except those in the added node and the former "head" node. This implies // except those in the added node and the former "head" node. This implies
// that the new node is added after head, and so must be the new head or the // that the new node is added after head, and so must be the new head or the
// new front of the queue. // new front of the queue.
static PerThreadSynch *Skip(PerThreadSynch *x) { static PerThreadSynch* Skip(PerThreadSynch* x) {
PerThreadSynch *x0 = nullptr; PerThreadSynch* x0 = nullptr;
PerThreadSynch *x1 = x; PerThreadSynch* x1 = x;
PerThreadSynch *x2 = x->skip; PerThreadSynch* x2 = x->skip;
if (x2 != nullptr) { if (x2 != nullptr) {
// Each iteration attempts to advance sequence (x0,x1,x2) to next sequence // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
// such that x1 == x0->skip && x2 == x1->skip // such that x1 == x0->skip && x2 == x1->skip
...@@ -871,7 +868,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) { ...@@ -871,7 +868,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) {
// The latter is going to be removed out of order, because of a timeout. // The latter is going to be removed out of order, because of a timeout.
// Check whether "ancestor" has a skip field pointing to "to_be_removed", // Check whether "ancestor" has a skip field pointing to "to_be_removed",
// and fix it if it does. // and fix it if it does.
static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) { static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
if (to_be_removed->skip != nullptr) { if (to_be_removed->skip != nullptr) {
ancestor->skip = to_be_removed->skip; // can skip past to_be_removed ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
...@@ -883,7 +880,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) { ...@@ -883,7 +880,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
} }
} }
static void CondVarEnqueue(SynchWaitParams *waitp); static void CondVarEnqueue(SynchWaitParams* waitp);
// Enqueue thread "waitp->thread" on a waiter queue. // Enqueue thread "waitp->thread" on a waiter queue.
// Called with mutex spinlock held if head != nullptr // Called with mutex spinlock held if head != nullptr
...@@ -904,8 +901,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp); ...@@ -904,8 +901,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp);
// returned. This mechanism is used by CondVar to queue a thread on the // returned. This mechanism is used by CondVar to queue a thread on the
// condition variable queue instead of the mutex queue in implementing Wait(). // condition variable queue instead of the mutex queue in implementing Wait().
// In this case, Enqueue() can return nullptr (if head==nullptr). // In this case, Enqueue() can return nullptr (if head==nullptr).
static PerThreadSynch *Enqueue(PerThreadSynch *head, static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
SynchWaitParams *waitp, intptr_t mu, int flags) { intptr_t mu, int flags) {
// If we have been given a cv_word, call CondVarEnqueue() and return // If we have been given a cv_word, call CondVarEnqueue() and return
// the previous head of the Mutex waiter queue. // the previous head of the Mutex waiter queue.
if (waitp->cv_word != nullptr) { if (waitp->cv_word != nullptr) {
...@@ -913,7 +910,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, ...@@ -913,7 +910,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
return head; return head;
} }
PerThreadSynch *s = waitp->thread; PerThreadSynch* s = waitp->thread;
ABSL_RAW_CHECK( ABSL_RAW_CHECK(
s->waitp == nullptr || // normal case s->waitp == nullptr || // normal case
s->waitp == waitp || // Fer()---transfer from condition variable s->waitp == waitp || // Fer()---transfer from condition variable
...@@ -949,7 +946,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, ...@@ -949,7 +946,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
s->maybe_unlocking = false; // no one is searching an empty list s->maybe_unlocking = false; // no one is searching an empty list
head = s; // s is new head head = s; // s is new head
} else { } else {
PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
if (s->priority > head->priority) { // s's priority is above head's if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front. // try to put s in priority-fifo order, or failing that at the front.
...@@ -960,7 +957,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, ...@@ -960,7 +957,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// Within a skip chain, all waiters have the same priority, so we can // Within a skip chain, all waiters have the same priority, so we can
// skip forward through the chains until we find one with a lower // skip forward through the chains until we find one with a lower
// priority than the waiter to be enqueued. // priority than the waiter to be enqueued.
PerThreadSynch *advance_to = head; // next value of enqueue_after PerThreadSynch* advance_to = head; // next value of enqueue_after
do { do {
enqueue_after = advance_to; enqueue_after = advance_to;
// (side-effect: optimizes skip chain) // (side-effect: optimizes skip chain)
...@@ -1022,8 +1019,8 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, ...@@ -1022,8 +1019,8 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// whose last element is head. The new head element is returned, or null // whose last element is head. The new head element is returned, or null
// if the list is made empty. // if the list is made empty.
// Dequeue is called with both spinlock and Mutex held. // Dequeue is called with both spinlock and Mutex held.
static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) { static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
PerThreadSynch *w = pw->next; PerThreadSynch* w = pw->next;
pw->next = w->next; // snip w out of list pw->next = w->next; // snip w out of list
if (head == w) { // we removed the head if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
...@@ -1045,11 +1042,11 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) { ...@@ -1045,11 +1042,11 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
// singly-linked list wake_list in the order found. Assumes that // singly-linked list wake_list in the order found. Assumes that
// there is only one such element if the element has how == kExclusive. // there is only one such element if the element has how == kExclusive.
// Return the new head. // Return the new head.
static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head, static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
PerThreadSynch *pw, PerThreadSynch* pw,
PerThreadSynch **wake_tail) { PerThreadSynch** wake_tail) {
PerThreadSynch *orig_h = head; PerThreadSynch* orig_h = head;
PerThreadSynch *w = pw->next; PerThreadSynch* w = pw->next;
bool skipped = false; bool skipped = false;
do { do {
if (w->wake) { // remove this element if (w->wake) { // remove this element
...@@ -1083,7 +1080,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head, ...@@ -1083,7 +1080,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex. // Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list. // Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) { void Mutex::TryRemove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling; SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock // acquire spinlock & lock
...@@ -1091,10 +1088,10 @@ void Mutex::TryRemove(PerThreadSynch *s) { ...@@ -1091,10 +1088,10 @@ void Mutex::TryRemove(PerThreadSynch *s) {
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter, mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
std::memory_order_acquire, std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch* h = GetPerThreadSynch(v);
if (h != nullptr) { if (h != nullptr) {
PerThreadSynch *pw = h; // pw is w's predecessor PerThreadSynch* pw = h; // pw is w's predecessor
PerThreadSynch *w; PerThreadSynch* w;
if ((w = pw->next) != s) { // search for thread, if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element do { // processing at least one element
// If the current element isn't equivalent to the waiter to be // If the current element isn't equivalent to the waiter to be
...@@ -1129,8 +1126,7 @@ void Mutex::TryRemove(PerThreadSynch *s) { ...@@ -1129,8 +1126,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
h->readers = 0; // we hold writer lock h->readers = 0; // we hold writer lock
h->maybe_unlocking = false; // finished unlocking h->maybe_unlocking = false; // finished unlocking
} }
} while (!mu_.compare_exchange_weak(v, nv, } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_release,
std::memory_order_relaxed)); std::memory_order_relaxed));
} }
} }
...@@ -1140,7 +1136,7 @@ void Mutex::TryRemove(PerThreadSynch *s) { ...@@ -1140,7 +1136,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// if the wait extends past the absolute time specified, even if "s" is still // if the wait extends past the absolute time specified, even if "s" is still
// on the mutex queue. In this case, remove "s" from the queue and return // on the mutex queue. In this case, remove "s" from the queue and return
// true, otherwise return false. // true, otherwise return false.
void Mutex::Block(PerThreadSynch *s) { void Mutex::Block(PerThreadSynch* s) {
while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
if (!DecrementSynchSem(this, s, s->waitp->timeout)) { if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
// After a timeout, we go into a spin loop until we remove ourselves // After a timeout, we go into a spin loop until we remove ourselves
...@@ -1169,8 +1165,8 @@ void Mutex::Block(PerThreadSynch *s) { ...@@ -1169,8 +1165,8 @@ void Mutex::Block(PerThreadSynch *s) {
} }
// Wake thread w, and return the next thread in the list. // Wake thread w, and return the next thread in the list.
PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) { PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
PerThreadSynch *next = w->next; PerThreadSynch* next = w->next;
w->next = nullptr; w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
IncrementSynchSem(this, w); IncrementSynchSem(this, w);
...@@ -1178,7 +1174,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) { ...@@ -1178,7 +1174,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
return next; return next;
} }
static GraphId GetGraphIdLocked(Mutex *mu) static GraphId GetGraphIdLocked(Mutex* mu)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) { ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
if (!deadlock_graph) { // (re)create the deadlock graph. if (!deadlock_graph) { // (re)create the deadlock graph.
deadlock_graph = deadlock_graph =
...@@ -1188,7 +1184,7 @@ static GraphId GetGraphIdLocked(Mutex *mu) ...@@ -1188,7 +1184,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
return deadlock_graph->GetId(mu); return deadlock_graph->GetId(mu);
} }
static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
deadlock_graph_mu.Lock(); deadlock_graph_mu.Lock();
GraphId id = GetGraphIdLocked(mu); GraphId id = GetGraphIdLocked(mu);
deadlock_graph_mu.Unlock(); deadlock_graph_mu.Unlock();
...@@ -1198,7 +1194,7 @@ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { ...@@ -1198,7 +1194,7 @@ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
// Record a lock acquisition. This is used in debug mode for deadlock // Record a lock acquisition. This is used in debug mode for deadlock
// detection. The held_locks pointer points to the relevant data // detection. The held_locks pointer points to the relevant data
// structure for each case. // structure for each case.
static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n; int n = held_locks->n;
int i = 0; int i = 0;
while (i != n && held_locks->locks[i].id != id) { while (i != n && held_locks->locks[i].id != id) {
...@@ -1222,7 +1218,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { ...@@ -1222,7 +1218,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
// eventually followed by a call to LockLeave(mu, id, x) by the same thread. // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
// It does not process the event if is not needed when deadlock detection is // It does not process the event if is not needed when deadlock detection is
// disabled. // disabled.
static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n; int n = held_locks->n;
int i = 0; int i = 0;
while (i != n && held_locks->locks[i].id != id) { while (i != n && held_locks->locks[i].id != id) {
...@@ -1237,11 +1233,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { ...@@ -1237,11 +1233,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
i++; i++;
} }
if (i == n) { // mu missing means releasing unheld lock if (i == n) { // mu missing means releasing unheld lock
SynchEvent *mu_events = GetSynchEvent(mu); SynchEvent* mu_events = GetSynchEvent(mu);
ABSL_RAW_LOG(FATAL, ABSL_RAW_LOG(FATAL,
"thread releasing lock it does not hold: %p %s; " "thread releasing lock it does not hold: %p %s; "
, ,
static_cast<void *>(mu), static_cast<void*>(mu),
mu_events == nullptr ? "" : mu_events->name); mu_events == nullptr ? "" : mu_events->name);
} }
} }
...@@ -1258,7 +1254,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { ...@@ -1258,7 +1254,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
} }
// Call LockEnter() if in debug mode and deadlock detection is enabled. // Call LockEnter() if in debug mode and deadlock detection is enabled.
static inline void DebugOnlyLockEnter(Mutex *mu) { static inline void DebugOnlyLockEnter(Mutex* mu) {
if (kDebugMode) { if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) != if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) { OnDeadlockCycle::kIgnore) {
...@@ -1268,7 +1264,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) { ...@@ -1268,7 +1264,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) {
} }
// Call LockEnter() if in debug mode and deadlock detection is enabled. // Call LockEnter() if in debug mode and deadlock detection is enabled.
static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) { static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
if (kDebugMode) { if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) != if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) { OnDeadlockCycle::kIgnore) {
...@@ -1278,7 +1274,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) { ...@@ -1278,7 +1274,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
} }
// Call LockLeave() if in debug mode and deadlock detection is enabled. // Call LockLeave() if in debug mode and deadlock detection is enabled.
static inline void DebugOnlyLockLeave(Mutex *mu) { static inline void DebugOnlyLockLeave(Mutex* mu) {
if (kDebugMode) { if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) != if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) { OnDeadlockCycle::kIgnore) {
...@@ -1287,7 +1283,7 @@ static inline void DebugOnlyLockLeave(Mutex *mu) { ...@@ -1287,7 +1283,7 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
} }
} }
static char *StackString(void **pcs, int n, char *buf, int maxlen, static char* StackString(void** pcs, int n, char* buf, int maxlen,
bool symbolize) { bool symbolize) {
static constexpr int kSymLen = 200; static constexpr int kSymLen = 200;
char sym[kSymLen]; char sym[kSymLen];
...@@ -1310,14 +1306,16 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen, ...@@ -1310,14 +1306,16 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf; return buf;
} }
static char *CurrentStackString(char *buf, int maxlen, bool symbolize) { static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
void *pcs[40]; void* pcs[40];
return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf, return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
maxlen, symbolize); maxlen, symbolize);
} }
namespace { namespace {
enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle; enum {
kMaxDeadlockPathLen = 10
}; // maximum length of a deadlock cycle;
// a path this long would be remarkable // a path this long would be remarkable
// Buffers required to report a deadlock. // Buffers required to report a deadlock.
// We do not allocate them on stack to avoid large stack frame. // We do not allocate them on stack to avoid large stack frame.
...@@ -1328,11 +1326,11 @@ struct DeadlockReportBuffers { ...@@ -1328,11 +1326,11 @@ struct DeadlockReportBuffers {
struct ScopedDeadlockReportBuffers { struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffers() { ScopedDeadlockReportBuffers() {
b = reinterpret_cast<DeadlockReportBuffers *>( b = reinterpret_cast<DeadlockReportBuffers*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*b))); base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
} }
~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); } ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
DeadlockReportBuffers *b; DeadlockReportBuffers* b;
}; };
// Helper to pass to GraphCycles::UpdateStackTrace. // Helper to pass to GraphCycles::UpdateStackTrace.
...@@ -1343,13 +1341,13 @@ int GetStack(void** stack, int max_depth) { ...@@ -1343,13 +1341,13 @@ int GetStack(void** stack, int max_depth) {
// Called in debug mode when a thread is about to acquire a lock in a way that // Called in debug mode when a thread is about to acquire a lock in a way that
// may block. // may block.
static GraphId DeadlockCheck(Mutex *mu) { static GraphId DeadlockCheck(Mutex* mu) {
if (synch_deadlock_detection.load(std::memory_order_acquire) == if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kIgnore) { OnDeadlockCycle::kIgnore) {
return InvalidGraphId(); return InvalidGraphId();
} }
SynchLocksHeld *all_locks = Synch_GetAllLocks(); SynchLocksHeld* all_locks = Synch_GetAllLocks();
absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu); absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
const GraphId mu_id = GetGraphIdLocked(mu); const GraphId mu_id = GetGraphIdLocked(mu);
...@@ -1371,8 +1369,8 @@ static GraphId DeadlockCheck(Mutex *mu) { ...@@ -1371,8 +1369,8 @@ static GraphId DeadlockCheck(Mutex *mu) {
// For each other mutex already held by this thread: // For each other mutex already held by this thread:
for (int i = 0; i != all_locks->n; i++) { for (int i = 0; i != all_locks->n; i++) {
const GraphId other_node_id = all_locks->locks[i].id; const GraphId other_node_id = all_locks->locks[i].id;
const Mutex *other = const Mutex* other =
static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id)); static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
if (other == nullptr) { if (other == nullptr) {
// Ignore stale lock // Ignore stale lock
continue; continue;
...@@ -1381,7 +1379,7 @@ static GraphId DeadlockCheck(Mutex *mu) { ...@@ -1381,7 +1379,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Add the acquired-before edge to the graph. // Add the acquired-before edge to the graph.
if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) { if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
ScopedDeadlockReportBuffers scoped_buffers; ScopedDeadlockReportBuffers scoped_buffers;
DeadlockReportBuffers *b = scoped_buffers.b; DeadlockReportBuffers* b = scoped_buffers.b;
static int number_of_reported_deadlocks = 0; static int number_of_reported_deadlocks = 0;
number_of_reported_deadlocks++; number_of_reported_deadlocks++;
// Symbolize only 2 first deadlock report to avoid huge slowdowns. // Symbolize only 2 first deadlock report to avoid huge slowdowns.
...@@ -1392,25 +1390,25 @@ static GraphId DeadlockCheck(Mutex *mu) { ...@@ -1392,25 +1390,25 @@ static GraphId DeadlockCheck(Mutex *mu) {
for (int j = 0; j != all_locks->n; j++) { for (int j = 0; j != all_locks->n; j++) {
void* pr = deadlock_graph->Ptr(all_locks->locks[j].id); void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
if (pr != nullptr) { if (pr != nullptr) {
snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr); snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
len += strlen(&b->buf[len]); len += strlen(&b->buf[len]);
} }
} }
ABSL_RAW_LOG(ERROR, ABSL_RAW_LOG(ERROR,
"Acquiring absl::Mutex %p while holding %s; a cycle in the " "Acquiring absl::Mutex %p while holding %s; a cycle in the "
"historical lock ordering graph has been observed", "historical lock ordering graph has been observed",
static_cast<void *>(mu), b->buf); static_cast<void*>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: "); ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath( int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path); ABSL_ARRAYSIZE(b->path), b->path);
for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) { for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j]; GraphId id = b->path[j];
Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id)); Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue; if (path_mu == nullptr) continue;
void** stack; void** stack;
int depth = deadlock_graph->GetStackTrace(id, &stack); int depth = deadlock_graph->GetStackTrace(id, &stack);
snprintf(b->buf, sizeof(b->buf), snprintf(b->buf, sizeof(b->buf),
"mutex@%p stack: ", static_cast<void *>(path_mu)); "mutex@%p stack: ", static_cast<void*>(path_mu));
StackString(stack, depth, b->buf + strlen(b->buf), StackString(stack, depth, b->buf + strlen(b->buf),
static_cast<int>(sizeof(b->buf) - strlen(b->buf)), static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
symbolize); symbolize);
...@@ -1434,7 +1432,7 @@ static GraphId DeadlockCheck(Mutex *mu) { ...@@ -1434,7 +1432,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Invoke DeadlockCheck() iff we're in debug mode and // Invoke DeadlockCheck() iff we're in debug mode and
// deadlock checking has been enabled. // deadlock checking has been enabled.
static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) { static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) != if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) { OnDeadlockCycle::kIgnore) {
return DeadlockCheck(mu); return DeadlockCheck(mu);
...@@ -1461,13 +1459,13 @@ void Mutex::AssertNotHeld() const { ...@@ -1461,13 +1459,13 @@ void Mutex::AssertNotHeld() const {
(mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 && (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
synch_deadlock_detection.load(std::memory_order_acquire) != synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) { OnDeadlockCycle::kIgnore) {
GraphId id = GetGraphId(const_cast<Mutex *>(this)); GraphId id = GetGraphId(const_cast<Mutex*>(this));
SynchLocksHeld *locks = Synch_GetAllLocks(); SynchLocksHeld* locks = Synch_GetAllLocks();
for (int i = 0; i != locks->n; i++) { for (int i = 0; i != locks->n; i++) {
if (locks->locks[i].id == id) { if (locks->locks[i].id == id) {
SynchEvent *mu_events = GetSynchEvent(this); SynchEvent* mu_events = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s", ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
static_cast<const void *>(this), static_cast<const void*>(this),
(mu_events == nullptr ? "" : mu_events->name)); (mu_events == nullptr ? "" : mu_events->name));
} }
} }
...@@ -1480,7 +1478,7 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { ...@@ -1480,7 +1478,7 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
int c = GetMutexGlobals().spinloop_iterations; int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed); intptr_t v = mu->load(std::memory_order_relaxed);
if ((v & (kMuReader|kMuEvent)) != 0) { if ((v & (kMuReader | kMuEvent)) != 0) {
return false; // a reader or tracing -> give up return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v, mu->compare_exchange_strong(v, kMuWriter | v,
...@@ -1498,8 +1496,7 @@ void Mutex::Lock() { ...@@ -1498,8 +1496,7 @@ void Mutex::Lock() {
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
// try fast acquire, then spin loop // try fast acquire, then spin loop
if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 || if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
!mu_.compare_exchange_strong(v, kMuWriter | v, !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
// try spin acquire, then slow loop // try spin acquire, then slow loop
if (!TryAcquireWithSpinning(&this->mu_)) { if (!TryAcquireWithSpinning(&this->mu_)) {
...@@ -1525,7 +1522,7 @@ void Mutex::ReaderLock() { ...@@ -1525,7 +1522,7 @@ void Mutex::ReaderLock() {
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
} }
void Mutex::LockWhen(const Condition &cond) { void Mutex::LockWhen(const Condition& cond) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kExclusive, &cond, 0); this->LockSlow(kExclusive, &cond, 0);
...@@ -1533,27 +1530,26 @@ void Mutex::LockWhen(const Condition &cond) { ...@@ -1533,27 +1530,26 @@ void Mutex::LockWhen(const Condition &cond) {
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
} }
bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) { bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
bool res = LockSlowWithDeadline(kExclusive, &cond, bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
KernelTimeout(timeout), 0);
DebugOnlyLockEnter(this, id); DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
return res; return res;
} }
bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) { bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
bool res = LockSlowWithDeadline(kExclusive, &cond, bool res =
KernelTimeout(deadline), 0); LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
DebugOnlyLockEnter(this, id); DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
return res; return res;
} }
void Mutex::ReaderLockWhen(const Condition &cond) { void Mutex::ReaderLockWhen(const Condition& cond) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kShared, &cond, 0); this->LockSlow(kShared, &cond, 0);
...@@ -1561,7 +1557,7 @@ void Mutex::ReaderLockWhen(const Condition &cond) { ...@@ -1561,7 +1557,7 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
} }
bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond, bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
absl::Duration timeout) { absl::Duration timeout) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
...@@ -1571,7 +1567,7 @@ bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond, ...@@ -1571,7 +1567,7 @@ bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
return res; return res;
} }
bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond, bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
absl::Time deadline) { absl::Time deadline) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
...@@ -1581,7 +1577,7 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond, ...@@ -1581,7 +1577,7 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
return res; return res;
} }
void Mutex::Await(const Condition &cond) { void Mutex::Await(const Condition& cond) {
if (cond.Eval()) { // condition already true; nothing to do if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) { if (kDebugMode) {
this->AssertReaderHeld(); this->AssertReaderHeld();
...@@ -1592,7 +1588,7 @@ void Mutex::Await(const Condition &cond) { ...@@ -1592,7 +1588,7 @@ void Mutex::Await(const Condition &cond) {
} }
} }
bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) { bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
if (cond.Eval()) { // condition already true; nothing to do if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) { if (kDebugMode) {
this->AssertReaderHeld(); this->AssertReaderHeld();
...@@ -1607,7 +1603,7 @@ bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) { ...@@ -1607,7 +1603,7 @@ bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
return res; return res;
} }
bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) { bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
if (cond.Eval()) { // condition already true; nothing to do if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) { if (kDebugMode) {
this->AssertReaderHeld(); this->AssertReaderHeld();
...@@ -1622,13 +1618,13 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) { ...@@ -1622,13 +1618,13 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
return res; return res;
} }
bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) { bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
this->AssertReaderHeld(); this->AssertReaderHeld();
MuHow how = MuHow how =
(mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared; (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how)); ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
SynchWaitParams waitp( SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this), Synch_GetPerThreadAnnotated(this),
nullptr /*no cv_word*/); nullptr /*no cv_word*/);
int flags = kMuHasBlocked; int flags = kMuHasBlocked;
if (!Condition::GuaranteedEqual(&cond, nullptr)) { if (!Condition::GuaranteedEqual(&cond, nullptr)) {
...@@ -1649,8 +1645,7 @@ bool Mutex::TryLock() { ...@@ -1649,8 +1645,7 @@ bool Mutex::TryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
mu_.compare_exchange_strong(v, kMuWriter | v, mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
DebugOnlyLockEnter(this); DebugOnlyLockEnter(this);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
...@@ -1682,7 +1677,7 @@ bool Mutex::ReaderTryLock() { ...@@ -1682,7 +1677,7 @@ bool Mutex::ReaderTryLock() {
// changing (typically because the reader count changes) under the CAS. We // changing (typically because the reader count changes) under the CAS. We
// limit the number of attempts to avoid having to think about livelock. // limit the number of attempts to avoid having to think about livelock.
int loop_limit = 5; int loop_limit = 5;
while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) { while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne, if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
std::memory_order_acquire, std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
...@@ -1750,8 +1745,7 @@ void Mutex::Unlock() { ...@@ -1750,8 +1745,7 @@ void Mutex::Unlock() {
static_cast<long long>(v), static_cast<long long>(x), static_cast<long long>(v), static_cast<long long>(x),
static_cast<long long>(y)); static_cast<long long>(y));
} }
if (x < y && if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
std::memory_order_release, std::memory_order_release,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
// fast writer release (writer with no waiters or with designated waker) // fast writer release (writer with no waiters or with designated waker)
...@@ -1763,7 +1757,7 @@ void Mutex::Unlock() { ...@@ -1763,7 +1757,7 @@ void Mutex::Unlock() {
// Requires v to represent a reader-locked state. // Requires v to represent a reader-locked state.
static bool ExactlyOneReader(intptr_t v) { static bool ExactlyOneReader(intptr_t v) {
assert((v & (kMuWriter|kMuReader)) == kMuReader); assert((v & (kMuWriter | kMuReader)) == kMuReader);
assert((v & kMuHigh) != 0); assert((v & kMuHigh) != 0);
// The more straightforward "(v & kMuHigh) == kMuOne" also works, but // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
// on some architectures the following generates slightly smaller code. // on some architectures the following generates slightly smaller code.
...@@ -1776,12 +1770,11 @@ void Mutex::ReaderUnlock() { ...@@ -1776,12 +1770,11 @@ void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this); DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
assert((v & (kMuWriter|kMuReader)) == kMuReader); assert((v & (kMuWriter | kMuReader)) == kMuReader);
if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) { if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
// fast reader release (reader with no waiters) // fast reader release (reader with no waiters)
intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne; intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
if (mu_.compare_exchange_strong(v, v - clear, if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_release,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
return; return;
...@@ -1820,7 +1813,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) { ...@@ -1820,7 +1813,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) {
} }
// Internal version of LockWhen(). See LockSlowWithDeadline() // Internal version of LockWhen(). See LockSlowWithDeadline()
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
int flags) { int flags) {
ABSL_RAW_CHECK( ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags), this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
...@@ -1828,7 +1821,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, ...@@ -1828,7 +1821,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
} }
// Compute cond->Eval() and tell race detectors that we do it under mutex mu. // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock, bool locking, bool trylock,
bool read_lock) { bool read_lock) {
// Delicate annotation dance. // Delicate annotation dance.
...@@ -1878,7 +1871,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, ...@@ -1878,7 +1871,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// tsan). As the result there is no tsan-visible synchronization between the // tsan). As the result there is no tsan-visible synchronization between the
// addition and this thread. So if we would enable race detection here, // addition and this thread. So if we would enable race detection here,
// it would race with the predicate initialization. // it would race with the predicate initialization.
static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) { static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
// Memory accesses are already ignored inside of lock/unlock operations, // Memory accesses are already ignored inside of lock/unlock operations,
// but synchronization operations are also ignored. When we evaluate the // but synchronization operations are also ignored. When we evaluate the
// predicate we must ignore only memory accesses but not synchronization, // predicate we must ignore only memory accesses but not synchronization,
...@@ -1903,7 +1896,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) { ...@@ -1903,7 +1896,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// obstruct this call // obstruct this call
// - kMuIsCond indicates that this is a conditional acquire (condition variable, // - kMuIsCond indicates that this is a conditional acquire (condition variable,
// Await, LockWhen) so contention profiling should be suppressed. // Await, LockWhen) so contention profiling should be suppressed.
bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
KernelTimeout t, int flags) { KernelTimeout t, int flags) {
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
bool unlock = false; bool unlock = false;
...@@ -1920,8 +1913,8 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, ...@@ -1920,8 +1913,8 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
} }
unlock = true; unlock = true;
} }
SynchWaitParams waitp( SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this), Synch_GetPerThreadAnnotated(this),
nullptr /*no cv_word*/); nullptr /*no cv_word*/);
if (!Condition::GuaranteedEqual(cond, nullptr)) { if (!Condition::GuaranteedEqual(cond, nullptr)) {
flags |= kMuIsCond; flags |= kMuIsCond;
...@@ -1963,20 +1956,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) { ...@@ -1963,20 +1956,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return; if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader), RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
"%s: Mutex corrupt: both reader and writer lock held: %p", "%s: Mutex corrupt: both reader and writer lock held: %p",
label, reinterpret_cast<void *>(v)); label, reinterpret_cast<void*>(v));
RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait, RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
"%s: Mutex corrupt: waiting writer with no waiters: %p", "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
label, reinterpret_cast<void *>(v)); reinterpret_cast<void*>(v));
assert(false); assert(false);
} }
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling; SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0; int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) { if ((v & kMuEvent) != 0) {
PostSynchEvent(this, PostSynchEvent(
waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK); this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
} }
ABSL_RAW_CHECK( ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
...@@ -2003,9 +1996,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ...@@ -2003,9 +1996,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
} }
} else { // need to access waiter list } else { // need to access waiter list
bool dowait = false; bool dowait = false;
if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter. // This thread tries to become the one and only waiter.
PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags); PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv = intptr_t nv =
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) | (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
kMuWait; kMuWait;
...@@ -2030,7 +2023,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ...@@ -2030,7 +2023,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) | (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuReader, kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) { std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch* h = GetPerThreadSynch(v);
h->readers += kMuOne; // inc reader count in waiter h->readers += kMuOne; // inc reader count in waiter
do { // release spinlock do { // release spinlock
v = mu_.load(std::memory_order_relaxed); v = mu_.load(std::memory_order_relaxed);
...@@ -2053,8 +2046,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ...@@ -2053,8 +2046,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) | (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuWait, kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) { std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch* h = GetPerThreadSynch(v);
PerThreadSynch *new_h = Enqueue(h, waitp, v, flags); PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
intptr_t wr_wait = 0; intptr_t wr_wait = 0;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed"); ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) { if (waitp->how == kExclusive && (v & kMuReader) != 0) {
...@@ -2063,7 +2056,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ...@@ -2063,7 +2056,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
do { // release spinlock do { // release spinlock
v = mu_.load(std::memory_order_relaxed); v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak( } while (!mu_.compare_exchange_weak(
v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait | v,
(v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
reinterpret_cast<intptr_t>(new_h), reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed)); std::memory_order_release, std::memory_order_relaxed));
dowait = true; dowait = true;
...@@ -2084,9 +2078,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ...@@ -2084,9 +2078,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code"); "detected illegal recursion into Mutex code");
if ((v & kMuEvent) != 0) { if ((v & kMuEvent) != 0) {
PostSynchEvent(this, PostSynchEvent(this, waitp->how == kExclusive
waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING : ? SYNCH_EV_LOCK_RETURNING
SYNCH_EV_READERLOCK_RETURNING); : SYNCH_EV_READERLOCK_RETURNING);
} }
} }
...@@ -2095,25 +2089,25 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ...@@ -2095,25 +2089,25 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false // which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue // or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true. // itself on the mutex/condvar to wait for its condition to become true.
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
SchedulingGuard::ScopedDisable disable_rescheduling; SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld(); this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock"); CheckForMutexCorruption(v, "Unlock");
if ((v & kMuEvent) != 0) { if ((v & kMuEvent) != 0) {
PostSynchEvent(this, PostSynchEvent(
(v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK); this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
} }
int c = 0; int c = 0;
// the waiter under consideration to wake, or zero // the waiter under consideration to wake, or zero
PerThreadSynch *w = nullptr; PerThreadSynch* w = nullptr;
// the predecessor to w or zero // the predecessor to w or zero
PerThreadSynch *pw = nullptr; PerThreadSynch* pw = nullptr;
// head of the list searched previously, or zero // head of the list searched previously, or zero
PerThreadSynch *old_h = nullptr; PerThreadSynch* old_h = nullptr;
// a condition that's known to be false. // a condition that's known to be false.
const Condition *known_false = nullptr; const Condition* known_false = nullptr;
PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake
intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
// later writer could have acquired the lock // later writer could have acquired the lock
// (starvation avoidance) // (starvation avoidance)
...@@ -2136,8 +2130,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { ...@@ -2136,8 +2130,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) { } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
// fast reader release (reader with no waiters) // fast reader release (reader with no waiters)
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne; intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
if (mu_.compare_exchange_strong(v, v - clear, if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_release,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
return; return;
} }
...@@ -2153,8 +2146,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { ...@@ -2153,8 +2146,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
do { // must loop to release spinlock as reader count may change do { // must loop to release spinlock as reader count may change
v = mu_.load(std::memory_order_relaxed); v = mu_.load(std::memory_order_relaxed);
// decrement reader count if there are readers // decrement reader count if there are readers
intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v; intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
PerThreadSynch *new_h = nullptr; PerThreadSynch* new_h = nullptr;
if (do_enqueue) { if (do_enqueue) {
// If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
// we must not retry here. The initial attempt will always have // we must not retry here. The initial attempt will always have
...@@ -2178,21 +2171,20 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { ...@@ -2178,21 +2171,20 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} }
// release spinlock & our lock; retry if reader-count changed // release spinlock & our lock; retry if reader-count changed
// (writer count cannot change since we hold lock) // (writer count cannot change since we hold lock)
} while (!mu_.compare_exchange_weak(v, nv, } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_release,
std::memory_order_relaxed)); std::memory_order_relaxed));
break; break;
} }
// There are waiters. // There are waiters.
// Set h to the head of the circular waiter list. // Set h to the head of the circular waiter list.
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch* h = GetPerThreadSynch(v);
if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) { if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
// a reader but not the last // a reader but not the last
h->readers -= kMuOne; // release our lock h->readers -= kMuOne; // release our lock
intptr_t nv = v; // normally just release spinlock intptr_t nv = v; // normally just release spinlock
if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond); PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr, ABSL_RAW_CHECK(new_h != nullptr,
"waiters disappeared during Enqueue()!"); "waiters disappeared during Enqueue()!");
nv &= kMuLow; nv &= kMuLow;
...@@ -2245,11 +2237,11 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { ...@@ -2245,11 +2237,11 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// old_h if it's set. If old_h==h, there's no one to wakeup at all. // old_h if it's set. If old_h==h, there's no one to wakeup at all.
if (old_h == h) { // we've searched before, and nothing's new if (old_h == h) { // we've searched before, and nothing's new
// so there's no one to wake. // so there's no one to wake.
intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait)); intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
h->readers = 0; h->readers = 0;
h->maybe_unlocking = false; // finished unlocking h->maybe_unlocking = false; // finished unlocking
if (waitp != nullptr) { // we must queue ourselves and sleep if (waitp != nullptr) { // we must queue ourselves and sleep
PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond); PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
nv &= kMuLow; nv &= kMuLow;
if (new_h != nullptr) { if (new_h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h); nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
...@@ -2263,8 +2255,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { ...@@ -2263,8 +2255,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} }
// set up to walk the list // set up to walk the list
PerThreadSynch *w_walk; // current waiter during list walk PerThreadSynch* w_walk; // current waiter during list walk
PerThreadSynch *pw_walk; // previous waiter during list walk PerThreadSynch* pw_walk; // previous waiter during list walk
if (old_h != nullptr) { // we've searched up to old_h before if (old_h != nullptr) { // we've searched up to old_h before
pw_walk = old_h; pw_walk = old_h;
w_walk = old_h->next; w_walk = old_h->next;
...@@ -2420,7 +2412,7 @@ void Mutex::Trans(MuHow how) { ...@@ -2420,7 +2412,7 @@ void Mutex::Trans(MuHow how) {
// condition variable. If this mutex is free, we simply wake the thread. // condition variable. If this mutex is free, we simply wake the thread.
// It will later acquire the mutex with high probability. Otherwise, we // It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex. // enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) { void Mutex::Fer(PerThreadSynch* w) {
SchedulingGuard::ScopedDisable disable_rescheduling; SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0; int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr, ABSL_RAW_CHECK(w->waitp->cond == nullptr,
...@@ -2445,9 +2437,9 @@ void Mutex::Fer(PerThreadSynch *w) { ...@@ -2445,9 +2437,9 @@ void Mutex::Fer(PerThreadSynch *w) {
IncrementSynchSem(this, w); IncrementSynchSem(this, w);
return; return;
} else { } else {
if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter. // This thread tries to become the one and only waiter.
PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond); PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr, ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves "Enqueue failed"); // we must queue ourselves
if (mu_.compare_exchange_strong( if (mu_.compare_exchange_strong(
...@@ -2457,8 +2449,8 @@ void Mutex::Fer(PerThreadSynch *w) { ...@@ -2457,8 +2449,8 @@ void Mutex::Fer(PerThreadSynch *w) {
} }
} else if ((v & kMuSpin) == 0 && } else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) { mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
PerThreadSynch *h = GetPerThreadSynch(v); PerThreadSynch* h = GetPerThreadSynch(v);
PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond); PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr, ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves "Enqueue failed"); // we must queue ourselves
do { do {
...@@ -2477,19 +2469,18 @@ void Mutex::Fer(PerThreadSynch *w) { ...@@ -2477,19 +2469,18 @@ void Mutex::Fer(PerThreadSynch *w) {
void Mutex::AssertHeld() const { void Mutex::AssertHeld() const {
if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) { if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
SynchEvent *e = GetSynchEvent(this); SynchEvent* e = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s", ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
static_cast<const void *>(this), static_cast<const void*>(this), (e == nullptr ? "" : e->name));
(e == nullptr ? "" : e->name));
} }
} }
void Mutex::AssertReaderHeld() const { void Mutex::AssertReaderHeld() const {
if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) { if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
SynchEvent *e = GetSynchEvent(this); SynchEvent* e = GetSynchEvent(this);
ABSL_RAW_LOG( ABSL_RAW_LOG(FATAL,
FATAL, "thread should hold at least a read lock on Mutex %p %s", "thread should hold at least a read lock on Mutex %p %s",
static_cast<const void *>(this), (e == nullptr ? "" : e->name)); static_cast<const void*>(this), (e == nullptr ? "" : e->name));
} }
} }
...@@ -2500,13 +2491,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events ...@@ -2500,13 +2491,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events
static const intptr_t kCvLow = 0x0003L; // low order bits of CV static const intptr_t kCvLow = 0x0003L; // low order bits of CV
// Hack to make constant values available to gdb pretty printer // Hack to make constant values available to gdb pretty printer
enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, }; enum {
kGdbCvSpin = kCvSpin,
kGdbCvEvent = kCvEvent,
kGdbCvLow = kCvLow,
};
static_assert(PerThreadSynch::kAlignment > kCvLow, static_assert(PerThreadSynch::kAlignment > kCvLow,
"PerThreadSynch::kAlignment must be greater than kCvLow"); "PerThreadSynch::kAlignment must be greater than kCvLow");
void CondVar::EnableDebugLog(const char *name) { void CondVar::EnableDebugLog(const char* name) {
SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin); SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
e->log = true; e->log = true;
UnrefSynchEvent(e); UnrefSynchEvent(e);
} }
...@@ -2517,21 +2512,19 @@ CondVar::~CondVar() { ...@@ -2517,21 +2512,19 @@ CondVar::~CondVar() {
} }
} }
// Remove thread s from the list of waiters on this condition variable. // Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) { void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling; SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v; intptr_t v;
int c = 0; int c = 0;
for (v = cv_.load(std::memory_order_relaxed);; for (v = cv_.load(std::memory_order_relaxed);;
v = cv_.load(std::memory_order_relaxed)) { v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
cv_.compare_exchange_strong(v, v | kCvSpin, cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) { if (h != nullptr) {
PerThreadSynch *w = h; PerThreadSynch* w = h;
while (w->next != s && w->next != h) { // search for thread while (w->next != s && w->next != h) { // search for thread
w = w->next; w = w->next;
} }
...@@ -2567,14 +2560,14 @@ void CondVar::Remove(PerThreadSynch *s) { ...@@ -2567,14 +2560,14 @@ void CondVar::Remove(PerThreadSynch *s) {
// variable queue just before the mutex is to be unlocked, and (most // variable queue just before the mutex is to be unlocked, and (most
// importantly) after any call to an external routine that might re-enter the // importantly) after any call to an external routine that might re-enter the
// mutex code. // mutex code.
static void CondVarEnqueue(SynchWaitParams *waitp) { static void CondVarEnqueue(SynchWaitParams* waitp) {
// This thread might be transferred to the Mutex queue by Fer() when // This thread might be transferred to the Mutex queue by Fer() when
// we are woken. To make sure that is what happens, Enqueue() doesn't // we are woken. To make sure that is what happens, Enqueue() doesn't
// call CondVarEnqueue() again but instead uses its normal code. We // call CondVarEnqueue() again but instead uses its normal code. We
// must do this before we queue ourselves so that cv_word will be null // must do this before we queue ourselves so that cv_word will be null
// when seen by the dequeuer, who may wish immediately to requeue // when seen by the dequeuer, who may wish immediately to requeue
// this thread on another queue. // this thread on another queue.
std::atomic<intptr_t> *cv_word = waitp->cv_word; std::atomic<intptr_t>* cv_word = waitp->cv_word;
waitp->cv_word = nullptr; waitp->cv_word = nullptr;
intptr_t v = cv_word->load(std::memory_order_relaxed); intptr_t v = cv_word->load(std::memory_order_relaxed);
...@@ -2588,7 +2581,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) { ...@@ -2588,7 +2581,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
} }
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be"); ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
waitp->thread->waitp = waitp; // prepare ourselves for waiting waitp->thread->waitp = waitp; // prepare ourselves for waiting
PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h == nullptr) { // add this thread to waiter list if (h == nullptr) { // add this thread to waiter list
waitp->thread->next = waitp->thread; waitp->thread->next = waitp->thread;
} else { } else {
...@@ -2601,7 +2594,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) { ...@@ -2601,7 +2594,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
std::memory_order_release); std::memory_order_release);
} }
bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) { bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
bool rc = false; // return value; true iff we timed-out bool rc = false; // return value; true iff we timed-out
intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed); intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
...@@ -2669,27 +2662,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) { ...@@ -2669,27 +2662,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
return rc; return rc;
} }
bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) { bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
return WaitCommon(mu, KernelTimeout(timeout)); return WaitCommon(mu, KernelTimeout(timeout));
} }
bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) { bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
return WaitCommon(mu, KernelTimeout(deadline)); return WaitCommon(mu, KernelTimeout(deadline));
} }
void CondVar::Wait(Mutex *mu) { void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
WaitCommon(mu, KernelTimeout::Never());
}
// Wake thread w // Wake thread w
// If it was a timed wait, w will be waiting on w->cv // If it was a timed wait, w will be waiting on w->cv
// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer(). // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
void CondVar::Wakeup(PerThreadSynch *w) { void CondVar::Wakeup(PerThreadSynch* w) {
if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) { if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
// The waiting thread only needs to observe "w->state == kAvailable" to be // The waiting thread only needs to observe "w->state == kAvailable" to be
// released, we must cache "cvmu" before clearing "next". // released, we must cache "cvmu" before clearing "next".
Mutex *mu = w->waitp->cvmu; Mutex* mu = w->waitp->cvmu;
w->next = nullptr; w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
Mutex::IncrementSynchSem(mu, w); Mutex::IncrementSynchSem(mu, w);
...@@ -2706,11 +2697,10 @@ void CondVar::Signal() { ...@@ -2706,11 +2697,10 @@ void CondVar::Signal() {
for (v = cv_.load(std::memory_order_relaxed); v != 0; for (v = cv_.load(std::memory_order_relaxed); v != 0;
v = cv_.load(std::memory_order_relaxed)) { v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
cv_.compare_exchange_strong(v, v | kCvSpin, cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
PerThreadSynch *w = nullptr; PerThreadSynch* w = nullptr;
if (h != nullptr) { // remove first waiter if (h != nullptr) { // remove first waiter
w = h->next; w = h->next;
if (w == h) { if (w == h) {
...@@ -2738,7 +2728,7 @@ void CondVar::Signal() { ...@@ -2738,7 +2728,7 @@ void CondVar::Signal() {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
} }
void CondVar::SignalAll () { void CondVar::SignalAll() {
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0); ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v; intptr_t v;
int c = 0; int c = 0;
...@@ -2752,10 +2742,10 @@ void CondVar::SignalAll () { ...@@ -2752,10 +2742,10 @@ void CondVar::SignalAll () {
if ((v & kCvSpin) == 0 && if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire, cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
std::memory_order_relaxed)) { std::memory_order_relaxed)) {
PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) { if (h != nullptr) {
PerThreadSynch *w; PerThreadSynch* w;
PerThreadSynch *n = h->next; PerThreadSynch* n = h->next;
do { // for every thread, wake it up do { // for every thread, wake it up
w = n; w = n;
n = n->next; n = n->next;
...@@ -2784,42 +2774,41 @@ void ReleasableMutexLock::Release() { ...@@ -2784,42 +2774,41 @@ void ReleasableMutexLock::Release() {
} }
#ifdef ABSL_HAVE_THREAD_SANITIZER #ifdef ABSL_HAVE_THREAD_SANITIZER
extern "C" void __tsan_read1(void *addr); extern "C" void __tsan_read1(void* addr);
#else #else
#define __tsan_read1(addr) // do nothing if TSan not enabled #define __tsan_read1(addr) // do nothing if TSan not enabled
#endif #endif
// A function that just returns its argument, dereferenced // A function that just returns its argument, dereferenced
static bool Dereference(void *arg) { static bool Dereference(void* arg) {
// ThreadSanitizer does not instrument this file for memory accesses. // ThreadSanitizer does not instrument this file for memory accesses.
// This function dereferences a user variable that can participate // This function dereferences a user variable that can participate
// in a data race, so we need to manually tell TSan about this memory access. // in a data race, so we need to manually tell TSan about this memory access.
__tsan_read1(arg); __tsan_read1(arg);
return *(static_cast<bool *>(arg)); return *(static_cast<bool*>(arg));
} }
ABSL_CONST_INIT const Condition Condition::kTrue; ABSL_CONST_INIT const Condition Condition::kTrue;
Condition::Condition(bool (*func)(void *), void *arg) Condition::Condition(bool (*func)(void*), void* arg)
: eval_(&CallVoidPtrFunction), : eval_(&CallVoidPtrFunction), arg_(arg) {
arg_(arg) {
static_assert(sizeof(&func) <= sizeof(callback_), static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer passed to Condition."); "An overlarge function pointer passed to Condition.");
StoreCallback(func); StoreCallback(func);
} }
bool Condition::CallVoidPtrFunction(const Condition *c) { bool Condition::CallVoidPtrFunction(const Condition* c) {
using FunctionPointer = bool (*)(void *); using FunctionPointer = bool (*)(void*);
FunctionPointer function_pointer; FunctionPointer function_pointer;
std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer)); std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
return (*function_pointer)(c->arg_); return (*function_pointer)(c->arg_);
} }
Condition::Condition(const bool *cond) Condition::Condition(const bool* cond)
: eval_(CallVoidPtrFunction), : eval_(CallVoidPtrFunction),
// const_cast is safe since Dereference does not modify arg // const_cast is safe since Dereference does not modify arg
arg_(const_cast<bool *>(cond)) { arg_(const_cast<bool*>(cond)) {
using FunctionPointer = bool (*)(void *); using FunctionPointer = bool (*)(void*);
const FunctionPointer dereference = Dereference; const FunctionPointer dereference = Dereference;
StoreCallback(dereference); StoreCallback(dereference);
} }
...@@ -2829,7 +2818,7 @@ bool Condition::Eval() const { ...@@ -2829,7 +2818,7 @@ bool Condition::Eval() const {
return (this->eval_ == nullptr) || (*this->eval_)(this); return (this->eval_ == nullptr) || (*this->eval_)(this);
} }
bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) { bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
// kTrue logic. // kTrue logic.
if (a == nullptr || a->eval_ == nullptr) { if (a == nullptr || a->eval_ == nullptr) {
return b == nullptr || b->eval_ == nullptr; return b == nullptr || b->eval_ == nullptr;
......
...@@ -141,8 +141,9 @@ struct SynchWaitParams; ...@@ -141,8 +141,9 @@ struct SynchWaitParams;
// issues that could potentially result in race conditions and deadlocks. // issues that could potentially result in race conditions and deadlocks.
// //
// For more information about the lock annotations, please see // For more information about the lock annotations, please see
// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) // [Thread Safety
// in the Clang documentation. // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
// documentation.
// //
// See also `MutexLock`, below, for scoped `Mutex` acquisition. // See also `MutexLock`, below, for scoped `Mutex` acquisition.
...@@ -323,7 +324,7 @@ class ABSL_LOCKABLE Mutex { ...@@ -323,7 +324,7 @@ class ABSL_LOCKABLE Mutex {
// `true`, `Await()` *may* skip the release/re-acquire step. // `true`, `Await()` *may* skip the release/re-acquire step.
// //
// `Await()` requires that this thread holds this `Mutex` in some mode. // `Await()` requires that this thread holds this `Mutex` in some mode.
void Await(const Condition &cond); void Await(const Condition& cond);
// Mutex::LockWhen() // Mutex::LockWhen()
// Mutex::ReaderLockWhen() // Mutex::ReaderLockWhen()
...@@ -333,11 +334,11 @@ class ABSL_LOCKABLE Mutex { ...@@ -333,11 +334,11 @@ class ABSL_LOCKABLE Mutex {
// be acquired, then atomically acquires this `Mutex`. `LockWhen()` is // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
// logically equivalent to `*Lock(); Await();` though they may have different // logically equivalent to `*Lock(); Await();` though they may have different
// performance characteristics. // performance characteristics.
void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(); void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION(); void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION();
void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() { void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
this->LockWhen(cond); this->LockWhen(cond);
} }
...@@ -362,9 +363,9 @@ class ABSL_LOCKABLE Mutex { ...@@ -362,9 +363,9 @@ class ABSL_LOCKABLE Mutex {
// Negative timeouts are equivalent to a zero timeout. // Negative timeouts are equivalent to a zero timeout.
// //
// This method requires that this thread holds this `Mutex` in some mode. // This method requires that this thread holds this `Mutex` in some mode.
bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout); bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout);
bool AwaitWithDeadline(const Condition &cond, absl::Time deadline); bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
// Mutex::LockWhenWithTimeout() // Mutex::LockWhenWithTimeout()
// Mutex::ReaderLockWhenWithTimeout() // Mutex::ReaderLockWhenWithTimeout()
...@@ -377,11 +378,11 @@ class ABSL_LOCKABLE Mutex { ...@@ -377,11 +378,11 @@ class ABSL_LOCKABLE Mutex {
// `true` on return. // `true` on return.
// //
// Negative timeouts are equivalent to a zero timeout. // Negative timeouts are equivalent to a zero timeout.
bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_EXCLUSIVE_LOCK_FUNCTION(); ABSL_EXCLUSIVE_LOCK_FUNCTION();
bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout) bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_SHARED_LOCK_FUNCTION(); ABSL_SHARED_LOCK_FUNCTION();
bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout) bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_EXCLUSIVE_LOCK_FUNCTION() { ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithTimeout(cond, timeout); return this->LockWhenWithTimeout(cond, timeout);
} }
...@@ -397,11 +398,11 @@ class ABSL_LOCKABLE Mutex { ...@@ -397,11 +398,11 @@ class ABSL_LOCKABLE Mutex {
// on return. // on return.
// //
// Deadlines in the past are equivalent to an immediate deadline. // Deadlines in the past are equivalent to an immediate deadline.
bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline) bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_EXCLUSIVE_LOCK_FUNCTION(); ABSL_EXCLUSIVE_LOCK_FUNCTION();
bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline) bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_SHARED_LOCK_FUNCTION(); ABSL_SHARED_LOCK_FUNCTION();
bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline) bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_EXCLUSIVE_LOCK_FUNCTION() { ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithDeadline(cond, deadline); return this->LockWhenWithDeadline(cond, deadline);
} }
...@@ -423,7 +424,7 @@ class ABSL_LOCKABLE Mutex { ...@@ -423,7 +424,7 @@ class ABSL_LOCKABLE Mutex {
// substantially reduce `Mutex` performance; it should be set only for // substantially reduce `Mutex` performance; it should be set only for
// non-production runs. Optimization options may also disable invariant // non-production runs. Optimization options may also disable invariant
// checks. // checks.
void EnableInvariantDebugging(void (*invariant)(void *), void *arg); void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
// Mutex::EnableDebugLog() // Mutex::EnableDebugLog()
// //
...@@ -432,7 +433,7 @@ class ABSL_LOCKABLE Mutex { ...@@ -432,7 +433,7 @@ class ABSL_LOCKABLE Mutex {
// call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made. // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
// //
// Note: This method substantially reduces `Mutex` performance. // Note: This method substantially reduces `Mutex` performance.
void EnableDebugLog(const char *name); void EnableDebugLog(const char* name);
// Deadlock detection // Deadlock detection
...@@ -460,7 +461,7 @@ class ABSL_LOCKABLE Mutex { ...@@ -460,7 +461,7 @@ class ABSL_LOCKABLE Mutex {
// A `MuHow` is a constant that indicates how a lock should be acquired. // A `MuHow` is a constant that indicates how a lock should be acquired.
// Internal implementation detail. Clients should ignore. // Internal implementation detail. Clients should ignore.
typedef const struct MuHowS *MuHow; typedef const struct MuHowS* MuHow;
// Mutex::InternalAttemptToUseMutexInFatalSignalHandler() // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
// //
...@@ -482,37 +483,37 @@ class ABSL_LOCKABLE Mutex { ...@@ -482,37 +483,37 @@ class ABSL_LOCKABLE Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required // Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem. // friendship with PerThreadSem.
static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w); static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w, static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
synchronization_internal::KernelTimeout t); synchronization_internal::KernelTimeout t);
// slow path acquire // slow path acquire
void LockSlowLoop(SynchWaitParams *waitp, int flags); void LockSlowLoop(SynchWaitParams* waitp, int flags);
// wrappers around LockSlowLoop() // wrappers around LockSlowLoop()
bool LockSlowWithDeadline(MuHow how, const Condition *cond, bool LockSlowWithDeadline(MuHow how, const Condition* cond,
synchronization_internal::KernelTimeout t, synchronization_internal::KernelTimeout t,
int flags); int flags);
void LockSlow(MuHow how, const Condition *cond, void LockSlow(MuHow how, const Condition* cond,
int flags) ABSL_ATTRIBUTE_COLD; int flags) ABSL_ATTRIBUTE_COLD;
// slow path release // slow path release
void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD; void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
// Common code between Await() and AwaitWithTimeout/Deadline() // Common code between Await() and AwaitWithTimeout/Deadline()
bool AwaitCommon(const Condition &cond, bool AwaitCommon(const Condition& cond,
synchronization_internal::KernelTimeout t); synchronization_internal::KernelTimeout t);
// Attempt to remove thread s from queue. // Attempt to remove thread s from queue.
void TryRemove(base_internal::PerThreadSynch *s); void TryRemove(base_internal::PerThreadSynch* s);
// Block a thread on mutex. // Block a thread on mutex.
void Block(base_internal::PerThreadSynch *s); void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor. // Wake a thread; return successor.
base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w); base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
friend class CondVar; // for access to Trans()/Fer(). friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer void Trans(MuHow how); // used for CondVar->Mutex transfer
void Fer( void Fer(
base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
// Catch the error of writing Mutex when intending MutexLock. // Catch the error of writing Mutex when intending MutexLock.
Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit) Mutex(const volatile Mutex* /*ignored*/) {} // NOLINT(runtime/explicit)
Mutex(const Mutex&) = delete; Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete; Mutex& operator=(const Mutex&) = delete;
...@@ -547,20 +548,20 @@ class ABSL_SCOPED_LOCKABLE MutexLock { ...@@ -547,20 +548,20 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that // guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable. // `mu` be dereferenceable.
explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock(); this->mu_->Lock();
} }
// Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
// the above, the condition given by `cond` is also guaranteed to hold when // the above, the condition given by `cond` is also guaranteed to hold when
// this object is constructed. // this object is constructed.
explicit MutexLock(Mutex *mu, const Condition &cond) explicit MutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
this->mu_->LockWhen(cond); this->mu_->LockWhen(cond);
} }
MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex) MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex) MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete; MutexLock& operator=(const MutexLock&) = delete;
MutexLock& operator=(MutexLock&&) = delete; MutexLock& operator=(MutexLock&&) = delete;
...@@ -568,7 +569,7 @@ class ABSL_SCOPED_LOCKABLE MutexLock { ...@@ -568,7 +569,7 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); } ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
private: private:
Mutex *const mu_; Mutex* const mu_;
}; };
// ReaderMutexLock // ReaderMutexLock
...@@ -577,11 +578,11 @@ class ABSL_SCOPED_LOCKABLE MutexLock { ...@@ -577,11 +578,11 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// releases a shared lock on a `Mutex` via RAII. // releases a shared lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE ReaderMutexLock { class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
public: public:
explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) { explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
mu->ReaderLock(); mu->ReaderLock();
} }
explicit ReaderMutexLock(Mutex *mu, const Condition &cond) explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
ABSL_SHARED_LOCK_FUNCTION(mu) ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
mu->ReaderLockWhen(cond); mu->ReaderLockWhen(cond);
...@@ -595,7 +596,7 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock { ...@@ -595,7 +596,7 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); } ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
private: private:
Mutex *const mu_; Mutex* const mu_;
}; };
// WriterMutexLock // WriterMutexLock
...@@ -604,12 +605,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock { ...@@ -604,12 +605,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
// releases a write (exclusive) lock on a `Mutex` via RAII. // releases a write (exclusive) lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE WriterMutexLock { class ABSL_SCOPED_LOCKABLE WriterMutexLock {
public: public:
explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
mu->WriterLock(); mu->WriterLock();
} }
explicit WriterMutexLock(Mutex *mu, const Condition &cond) explicit WriterMutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
mu->WriterLockWhen(cond); mu->WriterLockWhen(cond);
...@@ -623,7 +624,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { ...@@ -623,7 +624,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); } ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
private: private:
Mutex *const mu_; Mutex* const mu_;
}; };
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
...@@ -681,7 +682,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { ...@@ -681,7 +682,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
class Condition { class Condition {
public: public:
// A Condition that returns the result of "(*func)(arg)" // A Condition that returns the result of "(*func)(arg)"
Condition(bool (*func)(void *), void *arg); Condition(bool (*func)(void*), void* arg);
// Templated version for people who are averse to casts. // Templated version for people who are averse to casts.
// //
...@@ -692,8 +693,8 @@ class Condition { ...@@ -692,8 +693,8 @@ class Condition {
// Note: lambdas in this case must contain no bound variables. // Note: lambdas in this case must contain no bound variables.
// //
// See class comment for performance advice. // See class comment for performance advice.
template<typename T> template <typename T>
Condition(bool (*func)(T *), T *arg); Condition(bool (*func)(T*), T* arg);
// Same as above, but allows for cases where `arg` comes from a pointer that // Same as above, but allows for cases where `arg` comes from a pointer that
// is convertible to the function parameter type `T*` but not an exact match. // is convertible to the function parameter type `T*` but not an exact match.
...@@ -707,7 +708,7 @@ class Condition { ...@@ -707,7 +708,7 @@ class Condition {
// a function template is passed as `func`. Also, the dummy `typename = void` // a function template is passed as `func`. Also, the dummy `typename = void`
// template parameter exists just to work around a MSVC mangling bug. // template parameter exists just to work around a MSVC mangling bug.
template <typename T, typename = void> template <typename T, typename = void>
Condition(bool (*func)(T *), typename absl::internal::identity<T>::type *arg); Condition(bool (*func)(T*), typename absl::internal::identity<T>::type* arg);
// Templated version for invoking a method that returns a `bool`. // Templated version for invoking a method that returns a `bool`.
// //
...@@ -717,16 +718,16 @@ class Condition { ...@@ -717,16 +718,16 @@ class Condition {
// Implementation Note: `absl::internal::identity` is used to allow methods to // Implementation Note: `absl::internal::identity` is used to allow methods to
// come from base classes. A simpler signature like // come from base classes. A simpler signature like
// `Condition(T*, bool (T::*)())` does not suffice. // `Condition(T*, bool (T::*)())` does not suffice.
template<typename T> template <typename T>
Condition(T *object, bool (absl::internal::identity<T>::type::* method)()); Condition(T* object, bool (absl::internal::identity<T>::type::*method)());
// Same as above, for const members // Same as above, for const members
template<typename T> template <typename T>
Condition(const T *object, Condition(const T* object,
bool (absl::internal::identity<T>::type::* method)() const); bool (absl::internal::identity<T>::type::*method)() const);
// A Condition that returns the value of `*cond` // A Condition that returns the value of `*cond`
explicit Condition(const bool *cond); explicit Condition(const bool* cond);
// Templated version for invoking a functor that returns a `bool`. // Templated version for invoking a functor that returns a `bool`.
// This approach accepts pointers to non-mutable lambdas, `std::function`, // This approach accepts pointers to non-mutable lambdas, `std::function`,
...@@ -753,9 +754,9 @@ class Condition { ...@@ -753,9 +754,9 @@ class Condition {
// Implementation note: The second template parameter ensures that this // Implementation note: The second template parameter ensures that this
// constructor doesn't participate in overload resolution if T doesn't have // constructor doesn't participate in overload resolution if T doesn't have
// `bool operator() const`. // `bool operator() const`.
template <typename T, typename E = decltype( template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
static_cast<bool (T::*)() const>(&T::operator()))> &T::operator()))>
explicit Condition(const T *obj) explicit Condition(const T* obj)
: Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {} : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
// A Condition that always returns `true`. // A Condition that always returns `true`.
...@@ -771,7 +772,7 @@ class Condition { ...@@ -771,7 +772,7 @@ class Condition {
// Two `Condition` values are guaranteed equal if both their `func` and `arg` // Two `Condition` values are guaranteed equal if both their `func` and `arg`
// components are the same. A null pointer is equivalent to a `true` // components are the same. A null pointer is equivalent to a `true`
// condition. // condition.
static bool GuaranteedEqual(const Condition *a, const Condition *b); static bool GuaranteedEqual(const Condition* a, const Condition* b);
private: private:
// Sizing an allocation for a method pointer can be subtle. In the Itanium // Sizing an allocation for a method pointer can be subtle. In the Itanium
...@@ -799,12 +800,14 @@ class Condition { ...@@ -799,12 +800,14 @@ class Condition {
bool (*eval_)(const Condition*) = nullptr; bool (*eval_)(const Condition*) = nullptr;
// Either an argument for a function call or an object for a method call. // Either an argument for a function call or an object for a method call.
void *arg_ = nullptr; void* arg_ = nullptr;
// Various functions eval_ can point to: // Various functions eval_ can point to:
static bool CallVoidPtrFunction(const Condition*); static bool CallVoidPtrFunction(const Condition*);
template <typename T> static bool CastAndCallFunction(const Condition* c); template <typename T>
template <typename T> static bool CastAndCallMethod(const Condition* c); static bool CastAndCallFunction(const Condition* c);
template <typename T>
static bool CastAndCallMethod(const Condition* c);
// Helper methods for storing, validating, and reading callback arguments. // Helper methods for storing, validating, and reading callback arguments.
template <typename T> template <typename T>
...@@ -816,7 +819,7 @@ class Condition { ...@@ -816,7 +819,7 @@ class Condition {
} }
template <typename T> template <typename T>
inline void ReadCallback(T *callback) const { inline void ReadCallback(T* callback) const {
std::memcpy(callback, callback_, sizeof(*callback)); std::memcpy(callback, callback_, sizeof(*callback));
} }
...@@ -873,7 +876,7 @@ class CondVar { ...@@ -873,7 +876,7 @@ class CondVar {
// spurious wakeup), then reacquires the `Mutex` and returns. // spurious wakeup), then reacquires the `Mutex` and returns.
// //
// Requires and ensures that the current thread holds the `Mutex`. // Requires and ensures that the current thread holds the `Mutex`.
void Wait(Mutex *mu); void Wait(Mutex* mu);
// CondVar::WaitWithTimeout() // CondVar::WaitWithTimeout()
// //
...@@ -888,7 +891,7 @@ class CondVar { ...@@ -888,7 +891,7 @@ class CondVar {
// to return `true` or `false`. // to return `true` or `false`.
// //
// Requires and ensures that the current thread holds the `Mutex`. // Requires and ensures that the current thread holds the `Mutex`.
bool WaitWithTimeout(Mutex *mu, absl::Duration timeout); bool WaitWithTimeout(Mutex* mu, absl::Duration timeout);
// CondVar::WaitWithDeadline() // CondVar::WaitWithDeadline()
// //
...@@ -905,7 +908,7 @@ class CondVar { ...@@ -905,7 +908,7 @@ class CondVar {
// to return `true` or `false`. // to return `true` or `false`.
// //
// Requires and ensures that the current thread holds the `Mutex`. // Requires and ensures that the current thread holds the `Mutex`.
bool WaitWithDeadline(Mutex *mu, absl::Time deadline); bool WaitWithDeadline(Mutex* mu, absl::Time deadline);
// CondVar::Signal() // CondVar::Signal()
// //
...@@ -922,18 +925,17 @@ class CondVar { ...@@ -922,18 +925,17 @@ class CondVar {
// Causes all subsequent uses of this `CondVar` to be logged via // Causes all subsequent uses of this `CondVar` to be logged via
// `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`. // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
// Note: this method substantially reduces `CondVar` performance. // Note: this method substantially reduces `CondVar` performance.
void EnableDebugLog(const char *name); void EnableDebugLog(const char* name);
private: private:
bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t); bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
void Remove(base_internal::PerThreadSynch *s); void Remove(base_internal::PerThreadSynch* s);
void Wakeup(base_internal::PerThreadSynch *w); void Wakeup(base_internal::PerThreadSynch* w);
std::atomic<intptr_t> cv_; // Condition variable state. std::atomic<intptr_t> cv_; // Condition variable state.
CondVar(const CondVar&) = delete; CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete; CondVar& operator=(const CondVar&) = delete;
}; };
// Variants of MutexLock. // Variants of MutexLock.
// //
// If you find yourself using one of these, consider instead using // If you find yourself using one of these, consider instead using
...@@ -944,14 +946,14 @@ class CondVar { ...@@ -944,14 +946,14 @@ class CondVar {
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null. // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
class ABSL_SCOPED_LOCKABLE MutexLockMaybe { class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
public: public:
explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
if (this->mu_ != nullptr) { if (this->mu_ != nullptr) {
this->mu_->Lock(); this->mu_->Lock();
} }
} }
explicit MutexLockMaybe(Mutex *mu, const Condition &cond) explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
if (this->mu_ != nullptr) { if (this->mu_ != nullptr) {
...@@ -960,11 +962,13 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe { ...@@ -960,11 +962,13 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
} }
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() { ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); } if (this->mu_ != nullptr) {
this->mu_->Unlock();
}
} }
private: private:
Mutex *const mu_; Mutex* const mu_;
MutexLockMaybe(const MutexLockMaybe&) = delete; MutexLockMaybe(const MutexLockMaybe&) = delete;
MutexLockMaybe(MutexLockMaybe&&) = delete; MutexLockMaybe(MutexLockMaybe&&) = delete;
MutexLockMaybe& operator=(const MutexLockMaybe&) = delete; MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
...@@ -977,25 +981,27 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe { ...@@ -977,25 +981,27 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
// mutex before destruction. `Release()` may be called at most once. // mutex before destruction. `Release()` may be called at most once.
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
public: public:
explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
this->mu_->Lock(); this->mu_->Lock();
} }
explicit ReleasableMutexLock(Mutex *mu, const Condition &cond) explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { : mu_(mu) {
this->mu_->LockWhen(cond); this->mu_->LockWhen(cond);
} }
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() { ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); } if (this->mu_ != nullptr) {
this->mu_->Unlock();
}
} }
void Release() ABSL_UNLOCK_FUNCTION(); void Release() ABSL_UNLOCK_FUNCTION();
private: private:
Mutex *mu_; Mutex* mu_;
ReleasableMutexLock(const ReleasableMutexLock&) = delete; ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock(ReleasableMutexLock&&) = delete; ReleasableMutexLock(ReleasableMutexLock&&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete; ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
...@@ -1012,8 +1018,8 @@ inline CondVar::CondVar() : cv_(0) {} ...@@ -1012,8 +1018,8 @@ inline CondVar::CondVar() : cv_(0) {}
// static // static
template <typename T> template <typename T>
bool Condition::CastAndCallMethod(const Condition *c) { bool Condition::CastAndCallMethod(const Condition* c) {
T *object = static_cast<T *>(c->arg_); T* object = static_cast<T*>(c->arg_);
bool (T::*method_pointer)(); bool (T::*method_pointer)();
c->ReadCallback(&method_pointer); c->ReadCallback(&method_pointer);
return (object->*method_pointer)(); return (object->*method_pointer)();
...@@ -1021,44 +1027,43 @@ bool Condition::CastAndCallMethod(const Condition *c) { ...@@ -1021,44 +1027,43 @@ bool Condition::CastAndCallMethod(const Condition *c) {
// static // static
template <typename T> template <typename T>
bool Condition::CastAndCallFunction(const Condition *c) { bool Condition::CastAndCallFunction(const Condition* c) {
bool (*function)(T *); bool (*function)(T*);
c->ReadCallback(&function); c->ReadCallback(&function);
T *argument = static_cast<T *>(c->arg_); T* argument = static_cast<T*>(c->arg_);
return (*function)(argument); return (*function)(argument);
} }
template <typename T> template <typename T>
inline Condition::Condition(bool (*func)(T *), T *arg) inline Condition::Condition(bool (*func)(T*), T* arg)
: eval_(&CastAndCallFunction<T>), : eval_(&CastAndCallFunction<T>),
arg_(const_cast<void *>(static_cast<const void *>(arg))) { arg_(const_cast<void*>(static_cast<const void*>(arg))) {
static_assert(sizeof(&func) <= sizeof(callback_), static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer was passed to Condition."); "An overlarge function pointer was passed to Condition.");
StoreCallback(func); StoreCallback(func);
} }
template <typename T, typename> template <typename T, typename>
inline Condition::Condition(bool (*func)(T *), inline Condition::Condition(bool (*func)(T*),
typename absl::internal::identity<T>::type *arg) typename absl::internal::identity<T>::type* arg)
// Just delegate to the overload above. // Just delegate to the overload above.
: Condition(func, arg) {} : Condition(func, arg) {}
template <typename T> template <typename T>
inline Condition::Condition(T *object, inline Condition::Condition(T* object,
bool (absl::internal::identity<T>::type::*method)()) bool (absl::internal::identity<T>::type::*method)())
: eval_(&CastAndCallMethod<T>), : eval_(&CastAndCallMethod<T>), arg_(object) {
arg_(object) {
static_assert(sizeof(&method) <= sizeof(callback_), static_assert(sizeof(&method) <= sizeof(callback_),
"An overlarge method pointer was passed to Condition."); "An overlarge method pointer was passed to Condition.");
StoreCallback(method); StoreCallback(method);
} }
template <typename T> template <typename T>
inline Condition::Condition(const T *object, inline Condition::Condition(const T* object,
bool (absl::internal::identity<T>::type::*method)() bool (absl::internal::identity<T>::type::*method)()
const) const)
: eval_(&CastAndCallMethod<T>), : eval_(&CastAndCallMethod<T>),
arg_(reinterpret_cast<void *>(const_cast<T *>(object))) { arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
StoreCallback(method); StoreCallback(method);
} }
...@@ -1088,7 +1093,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)); ...@@ -1088,7 +1093,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
// //
// This has the same ordering and single-use limitations as // This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above. // RegisterMutexProfiler() above.
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles)); int64_t wait_cycles));
// Register a hook for CondVar tracing. // Register a hook for CondVar tracing.
...@@ -1103,7 +1108,7 @@ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, ...@@ -1103,7 +1108,7 @@ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
// //
// This has the same ordering and single-use limitations as // This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above. // RegisterMutexProfiler() above.
void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)); void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
// EnableMutexInvariantDebugging() // EnableMutexInvariantDebugging()
// //
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment