Commit 189d55a5 by Abseil Team Committed by Andy Getz

Export of internal Abseil changes

--
84bcdcd9497d1ec989f50c8dee93f656507c7bd6 by Abseil Team <absl-team@google.com>:

Reduce length of the `flat_hash_map<std::string, V>` type name in order to reduce binary bloat.

PiperOrigin-RevId: 391560997

--
5f49bd435e066989851dc045c7786ef400413f66 by Greg Falcon <gfalcon@google.com>:

Claim a bit from the Cord refcount for future use.

Also rename the increasingly-inaccurately named "Refcount" class to "RefcountAndFlags".

In optimized builds, this adds an extra mask instruction to decrement and test operations, but no new branches.  Future flags can be added at no extra cost.  Each additional flag will of course reduce the range of our refcount, but even with the bit added, we still support refcounts of 500 million.

PiperOrigin-RevId: 391557567
GitOrigin-RevId: 84bcdcd9497d1ec989f50c8dee93f656507c7bd6
Change-Id: I051823bf5a9a42d4fa9200e39563ab585ecab331
parent c1aa431c
...@@ -78,10 +78,7 @@ struct StringHash { ...@@ -78,10 +78,7 @@ struct StringHash {
} }
}; };
// Supports heterogeneous lookup for string-like elements. struct StringEq {
struct StringHashEq {
using Hash = StringHash;
struct Eq {
using is_transparent = void; using is_transparent = void;
bool operator()(absl::string_view lhs, absl::string_view rhs) const { bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs; return lhs == rhs;
...@@ -95,7 +92,12 @@ struct StringHashEq { ...@@ -95,7 +92,12 @@ struct StringHashEq {
bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
return lhs == rhs; return lhs == rhs;
} }
}; };
// Supports heterogeneous lookup for string-like elements.
struct StringHashEq {
using Hash = StringHash;
using Eq = StringEq;
}; };
template <> template <>
......
...@@ -80,12 +80,13 @@ enum Constants { ...@@ -80,12 +80,13 @@ enum Constants {
kMaxBytesToCopy = 511 kMaxBytesToCopy = 511
}; };
// Wraps std::atomic for reference counting. // Compact class for tracking the reference count and state flags for CordRep
class Refcount { // instances. Data is stored in an atomic int32_t for compactness and speed.
class RefcountAndFlags {
public: public:
constexpr Refcount() : count_{kRefIncrement} {} constexpr RefcountAndFlags() : count_{kRefIncrement} {}
struct Immortal {}; struct Immortal {};
explicit constexpr Refcount(Immortal) : count_(kImmortalTag) {} explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
// Increments the reference count. Imposes no memory ordering. // Increments the reference count. Imposes no memory ordering.
inline void Increment() { inline void Increment() {
...@@ -98,26 +99,27 @@ class Refcount { ...@@ -98,26 +99,27 @@ class Refcount {
// Returns false if there are no references outstanding; true otherwise. // Returns false if there are no references outstanding; true otherwise.
// Inserts barriers to ensure that state written before this method returns // Inserts barriers to ensure that state written before this method returns
// false will be visible to a thread that just observed this method returning // false will be visible to a thread that just observed this method returning
// false. // false. Always returns false when the immortal bit is set.
inline bool Decrement() { inline bool Decrement() {
int32_t refcount = count_.load(std::memory_order_acquire); int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
assert(refcount > 0 || refcount & kImmortalTag); assert(refcount > 0 || refcount & kImmortalFlag);
return refcount != kRefIncrement && return refcount != kRefIncrement &&
count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) != (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
kRefIncrement; kRefcountMask) != kRefIncrement;
} }
// Same as Decrement but expect that refcount is greater than 1. // Same as Decrement but expect that refcount is greater than 1.
inline bool DecrementExpectHighRefcount() { inline bool DecrementExpectHighRefcount() {
int32_t refcount = int32_t refcount =
count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel); count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
assert(refcount > 0 || refcount & kImmortalTag); kRefcountMask;
assert(refcount > 0 || refcount & kImmortalFlag);
return refcount != kRefIncrement; return refcount != kRefIncrement;
} }
// Returns the current reference count using acquire semantics. // Returns the current reference count using acquire semantics.
inline int32_t Get() const { inline int32_t Get() const {
return count_.load(std::memory_order_acquire) >> kImmortalShift; return count_.load(std::memory_order_acquire) >> kNumFlags;
} }
// Returns whether the atomic integer is 1. // Returns whether the atomic integer is 1.
...@@ -127,26 +129,34 @@ class Refcount { ...@@ -127,26 +129,34 @@ class Refcount {
// This call performs the test for a reference count of one, and // This call performs the test for a reference count of one, and
// performs the memory barrier needed for the owning thread // performs the memory barrier needed for the owning thread
// to act on the object, knowing that it has exclusive access to the // to act on the object, knowing that it has exclusive access to the
// object. // object. Always returns false when the immortal bit is set.
inline bool IsOne() { inline bool IsOne() {
return count_.load(std::memory_order_acquire) == kRefIncrement; return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
kRefIncrement;
} }
bool IsImmortal() const { bool IsImmortal() const {
return (count_.load(std::memory_order_relaxed) & kImmortalTag) != 0; return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
} }
private: private:
// We reserve the bottom bit to tag a reference count as immortal. // We reserve the bottom bits for flags.
// By making it `1` we ensure that we never reach `0` when adding/subtracting // kImmortalBit indicates that this entity should never be collected; it is
// `2`, thus it never looks as if it should be destroyed. // used for the StringConstant constructor to avoid collecting immutable
// These are used for the StringConstant constructor where we do not increase // constant cords.
// the refcount at construction time (due to constinit requirements) but we // kReservedFlag is reserved for future use.
// will still decrease it at destruction time to avoid branching on Unref.
enum { enum {
kImmortalShift = 1, kNumFlags = 2,
kRefIncrement = 1 << kImmortalShift,
kImmortalTag = kRefIncrement - 1 kImmortalFlag = 0x1,
kReservedFlag = 0x2,
kRefIncrement = (1 << kNumFlags),
// Bitmask to use when checking refcount by equality. This masks out
// all flags except kImmortalFlag, which is part of the refcount for
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
// if the immortal bit is set.)
kRefcountMask = ~kReservedFlag,
}; };
std::atomic<int32_t> count_; std::atomic<int32_t> count_;
...@@ -195,13 +205,13 @@ static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive"); ...@@ -195,13 +205,13 @@ static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
struct CordRep { struct CordRep {
CordRep() = default; CordRep() = default;
constexpr CordRep(Refcount::Immortal immortal, size_t l) constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
: length(l), refcount(immortal), tag(EXTERNAL), storage{} {} : length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
// The following three fields have to be less than 32 bytes since // The following three fields have to be less than 32 bytes since
// that is the smallest supported flat node size. // that is the smallest supported flat node size.
size_t length; size_t length;
Refcount refcount; RefcountAndFlags refcount;
// If tag < FLAT, it represents CordRepKind and indicates the type of node. // If tag < FLAT, it represents CordRepKind and indicates the type of node.
// Otherwise, the node type is CordRepFlat and the tag is the encoded size. // Otherwise, the node type is CordRepFlat and the tag is the encoded size.
uint8_t tag; uint8_t tag;
...@@ -275,7 +285,7 @@ using ExternalReleaserInvoker = void (*)(CordRepExternal*); ...@@ -275,7 +285,7 @@ using ExternalReleaserInvoker = void (*)(CordRepExternal*);
struct CordRepExternal : public CordRep { struct CordRepExternal : public CordRep {
CordRepExternal() = default; CordRepExternal() = default;
explicit constexpr CordRepExternal(absl::string_view str) explicit constexpr CordRepExternal(absl::string_view str)
: CordRep(Refcount::Immortal{}, str.size()), : CordRep(RefcountAndFlags::Immortal{}, str.size()),
base(str.data()), base(str.data()),
releaser_invoker(nullptr) {} releaser_invoker(nullptr) {}
......
...@@ -623,7 +623,7 @@ inline void CordRepBtree::Destroy(CordRepBtree* tree) { ...@@ -623,7 +623,7 @@ inline void CordRepBtree::Destroy(CordRepBtree* tree) {
inline CordRepBtree* CordRepBtree::CopyRaw() const { inline CordRepBtree* CordRepBtree::CopyRaw() const {
auto* tree = static_cast<CordRepBtree*>(::operator new(sizeof(CordRepBtree))); auto* tree = static_cast<CordRepBtree*>(::operator new(sizeof(CordRepBtree)));
memcpy(static_cast<void*>(tree), this, sizeof(CordRepBtree)); memcpy(static_cast<void*>(tree), this, sizeof(CordRepBtree));
new (&tree->refcount) Refcount; new (&tree->refcount) RefcountAndFlags;
return tree; return tree;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment