Commit 28549d18 by Dmitry Vyukov Committed by Copybara-Service

absl: speed up Mutex::ReaderLock/Unlock

Currently ReaderLock/Unlock tries CAS only once.
Even if there is moderate contention from other readers only,
ReaderLock/Unlock go onto slow path, which does lots of additional work
before retrying the CAS (since there are only readers, the slow path
logic is not really needed for anything).
Retry CAS while there are only readers.

name                                old cpu/op   new cpu/op   delta
BM_ReaderLock/real_time/threads:1   17.9ns ± 0%  17.9ns ± 0%     ~     (p=0.071 n=5+5)
BM_ReaderLock/real_time/threads:72  11.4µs ± 3%   8.4µs ± 4%  -26.24%  (p=0.008 n=5+5)

PiperOrigin-RevId: 566981511
Change-Id: I432a3c1d85b84943d0ad4776a34fa5bfcf5b3b8e
parent 556fcb57
...@@ -1523,12 +1523,19 @@ void Mutex::ReaderLock() { ...@@ -1523,12 +1523,19 @@ void Mutex::ReaderLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this); GraphId id = DebugOnlyDeadlockCheck(this);
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
// try fast acquire, then slow loop for (;;) {
if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 || // If there are non-readers holding the lock, use the slow loop.
!mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne, if (ABSL_PREDICT_FALSE(v & (kMuWriter | kMuWait | kMuEvent)) != 0) {
std::memory_order_acquire,
std::memory_order_relaxed)) {
this->LockSlow(kShared, nullptr, 0); this->LockSlow(kShared, nullptr, 0);
break;
}
// We can avoid the loop and only use the CAS when the lock is free or
// only held by readers.
if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong(
v, (kMuReader | v) + kMuOne, std::memory_order_acquire,
std::memory_order_relaxed))) {
break;
}
} }
DebugOnlyLockEnter(this, id); DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
...@@ -1702,16 +1709,20 @@ void Mutex::ReaderUnlock() { ...@@ -1702,16 +1709,20 @@ void Mutex::ReaderUnlock() {
DebugOnlyLockLeave(this); DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed); intptr_t v = mu_.load(std::memory_order_relaxed);
assert((v & (kMuWriter | kMuReader)) == kMuReader); assert((v & (kMuWriter | kMuReader)) == kMuReader);
if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) { for (;;) {
if (ABSL_PREDICT_FALSE((v & (kMuReader | kMuWait | kMuEvent)) !=
kMuReader)) {
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
break;
}
// fast reader release (reader with no waiters) // fast reader release (reader with no waiters)
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne; intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release, if (ABSL_PREDICT_TRUE(
std::memory_order_relaxed)) { mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); std::memory_order_relaxed))) {
return; break;
} }
} }
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment