Commit 421a74dc by Vertexwahn

Fix some spelling mistakes

parent 4ffaea74
...@@ -170,7 +170,7 @@ And finally install: ...@@ -170,7 +170,7 @@ And finally install:
cmake --build /temporary/build/abseil-cpp --target install cmake --build /temporary/build/abseil-cpp --target install
``` ```
# CMake Option Synposis # CMake Option Synopsis
## Enable Standard CMake Installation ## Enable Standard CMake Installation
......
# C++ Upgrade Tools # C++ Upgrade Tools
Abseil may occassionally release API-breaking changes. As noted in our Abseil may occasionally release API-breaking changes. As noted in our
[Compatibility Guidelines][compatibility-guide], we will aim to provide a tool [Compatibility Guidelines][compatibility-guide], we will aim to provide a tool
to do the work of effecting such API-breaking changes, when absolutely to do the work of effecting such API-breaking changes, when absolutely
necessary. necessary.
......
...@@ -331,7 +331,7 @@ ...@@ -331,7 +331,7 @@
// This functionality is supported by GNU linker. // This functionality is supported by GNU linker.
#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
#ifdef _AIX #ifdef _AIX
// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo // __attribute__((section(#name))) on AIX is achieved by using the `.csect` psudo
// op which includes an additional integer as part of its syntax indcating // op which includes an additional integer as part of its syntax indcating
// alignment. If data fall under different alignments then you might get a // alignment. If data fall under different alignments then you might get a
// compilation error indicating a `Section type conflict`. // compilation error indicating a `Section type conflict`.
......
...@@ -946,7 +946,7 @@ class ExceptionSafetyTest { ...@@ -946,7 +946,7 @@ class ExceptionSafetyTest {
* `std::unique_ptr<T> operator()() const` where T is the type being tested. * `std::unique_ptr<T> operator()() const` where T is the type being tested.
* It is used for reliably creating identical T instances to test on. * It is used for reliably creating identical T instances to test on.
* *
* - Operation: The operation object (passsed in via tester.WithOperation(...) * - Operation: The operation object (passed in via tester.WithOperation(...)
* or tester.Test(...)) must be invocable with the signature * or tester.Test(...)) must be invocable with the signature
* `void operator()(T*) const` where T is the type being tested. It is used * `void operator()(T*) const` where T is the type being tested. It is used
* for performing steps on a T instance that may throw and that need to be * for performing steps on a T instance that may throw and that need to be
......
...@@ -95,7 +95,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { ...@@ -95,7 +95,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
} }
TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
// This test repeatly creates and joins a series of threads, each of // This test repeatedly creates and joins a series of threads, each of
// which acquires and releases shared Mutex locks. This verifies // which acquires and releases shared Mutex locks. This verifies
// Mutex operations work correctly under a reused // Mutex operations work correctly under a reused
// ThreadIdentity. Note that the most likely failure mode of this // ThreadIdentity. Note that the most likely failure mode of this
......
...@@ -745,7 +745,7 @@ class InlinedVector { ...@@ -745,7 +745,7 @@ class InlinedVector {
// Erases the element at `pos`, returning an `iterator` pointing to where the // Erases the element at `pos`, returning an `iterator` pointing to where the
// erased element was located. // erased element was located.
// //
// NOTE: may return `end()`, which is not dereferencable. // NOTE: may return `end()`, which is not dereferenceable.
iterator erase(const_iterator pos) { iterator erase(const_iterator pos) {
ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos < end()); ABSL_HARDENING_ASSERT(pos < end());
...@@ -757,7 +757,7 @@ class InlinedVector { ...@@ -757,7 +757,7 @@ class InlinedVector {
// range [`from`, `to`), returning an `iterator` pointing to where the first // range [`from`, `to`), returning an `iterator` pointing to where the first
// erased element was located. // erased element was located.
// //
// NOTE: may return `end()`, which is not dereferencable. // NOTE: may return `end()`, which is not dereferenceable.
iterator erase(const_iterator from, const_iterator to) { iterator erase(const_iterator from, const_iterator to) {
ABSL_HARDENING_ASSERT(from >= begin()); ABSL_HARDENING_ASSERT(from >= begin());
ABSL_HARDENING_ASSERT(from <= to); ABSL_HARDENING_ASSERT(from <= to);
......
...@@ -66,7 +66,7 @@ void BM_StdVectorFill(benchmark::State& state) { ...@@ -66,7 +66,7 @@ void BM_StdVectorFill(benchmark::State& state) {
BENCHMARK(BM_StdVectorFill)->Range(1, 256); BENCHMARK(BM_StdVectorFill)->Range(1, 256);
// The purpose of the next two benchmarks is to verify that // The purpose of the next two benchmarks is to verify that
// absl::InlinedVector is efficient when moving is more efficent than // absl::InlinedVector is efficient when moving is more efficient than
// copying. To do so, we use strings that are larger than the short // copying. To do so, we use strings that are larger than the short
// string optimization. // string optimization.
bool StringRepresentedInline(std::string s) { bool StringRepresentedInline(std::string s) {
......
...@@ -87,7 +87,7 @@ struct common_policy_traits { ...@@ -87,7 +87,7 @@ struct common_policy_traits {
} }
private: private:
// To rank the overloads below for overload resoltion. Rank0 is preferred. // To rank the overloads below for overload resolution. Rank0 is preferred.
struct Rank2 {}; struct Rank2 {};
struct Rank1 : Rank2 {}; struct Rank1 : Rank2 {};
struct Rank0 : Rank1 {}; struct Rank0 : Rank1 {};
......
...@@ -165,7 +165,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed( ...@@ -165,7 +165,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
std::forward<F>(f)); std::forward<F>(f));
} }
// Given arguments of an std::pair's consructor, PairArgs() returns a pair of // Given arguments of an std::pair's constructor, PairArgs() returns a pair of
// tuples with references to the passed arguments. The tuples contain // tuples with references to the passed arguments. The tuples contain
// constructor arguments for the first and the second elements of the pair. // constructor arguments for the first and the second elements of the pair.
// //
......
...@@ -1050,7 +1050,7 @@ template <typename NotMemcpyPolicy> ...@@ -1050,7 +1050,7 @@ template <typename NotMemcpyPolicy>
void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy, void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy,
Storage* other) { Storage* other) {
// Note: `destroy` needs to use pre-swap allocator while `construct` - // Note: `destroy` needs to use pre-swap allocator while `construct` -
// post-swap allocator. Allocators will be swaped later on outside of // post-swap allocator. Allocators will be swapped later on outside of
// `SwapInlinedElements`. // `SwapInlinedElements`.
Storage* small_ptr = this; Storage* small_ptr = this;
Storage* large_ptr = other; Storage* large_ptr = other;
......
...@@ -85,7 +85,7 @@ void BM_OffsetVariable(benchmark::State& state) { ...@@ -85,7 +85,7 @@ void BM_OffsetVariable(benchmark::State& state) {
size_t m = 5; size_t m = 5;
size_t k = 7; size_t k = 7;
ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset, ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset,
"Inavlid offset"); "Invalid offset");
for (auto _ : state) { for (auto _ : state) {
DoNotOptimize(n); DoNotOptimize(n);
DoNotOptimize(m); DoNotOptimize(m);
......
...@@ -115,7 +115,7 @@ ...@@ -115,7 +115,7 @@
// starting with that index and extract potential candidates: occupied slots // starting with that index and extract potential candidates: occupied slots
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
// group, we stop and return an error. Each candidate slot `y` is compared with // group, we stop and return an error. Each candidate slot `y` is compared with
// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
// next probe index. Tombstones effectively behave like full slots that never // next probe index. Tombstones effectively behave like full slots that never
// match the value we're looking for. // match the value we're looking for.
// //
...@@ -2456,7 +2456,7 @@ class raw_hash_set { ...@@ -2456,7 +2456,7 @@ class raw_hash_set {
void rehash_and_grow_if_necessary() { void rehash_and_grow_if_necessary() {
const size_t cap = capacity(); const size_t cap = capacity();
if (cap > Group::kWidth && if (cap > Group::kWidth &&
// Do these calcuations in 64-bit to avoid overflow. // Do these calculations in 64-bit to avoid overflow.
size() * uint64_t{32} <= cap* uint64_t{25}) { size() * uint64_t{32} <= cap* uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity. // Squash DELETED without growing if there is enough capacity.
// //
......
...@@ -334,7 +334,7 @@ class node_hash_set ...@@ -334,7 +334,7 @@ class node_hash_set
// for the past-the-end iterator, which is invalidated. // for the past-the-end iterator, which is invalidated.
// //
// `swap()` requires that the node hash set's hashing and key equivalence // `swap()` requires that the node hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to // functions be Swappable, and are exchanged using unqualified calls to
// non-member `swap()`. If the set's allocator has // non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call // set to `true`, the allocators are also exchanged using an unqualified call
......
...@@ -438,7 +438,7 @@ CRC* CRC::Crc32c() { ...@@ -438,7 +438,7 @@ CRC* CRC::Crc32c() {
// This Concat implementation works for arbitrary polynomials. // This Concat implementation works for arbitrary polynomials.
void CRC::Concat(uint32_t* px, uint32_t y, size_t ylen) { void CRC::Concat(uint32_t* px, uint32_t y, size_t ylen) {
// https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks // https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks
// The CRC of a message M is the remainder of polynomial divison modulo G, // The CRC of a message M is the remainder of polynomial division modulo G,
// where the coefficient arithmetic is performed modulo 2 (so +/- are XOR): // where the coefficient arithmetic is performed modulo 2 (so +/- are XOR):
// R(x) = M(x) x**n (mod G) // R(x) = M(x) x**n (mod G)
// (n is the degree of G) // (n is the degree of G)
......
...@@ -53,7 +53,7 @@ class CRC { ...@@ -53,7 +53,7 @@ class CRC {
// points to an array of "length" zero bytes. // points to an array of "length" zero bytes.
virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0; virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0;
// Inverse opration of ExtendByZeroes. If `crc` is the CRC value of a string // Inverse operation of ExtendByZeroes. If `crc` is the CRC value of a string
// ending in `length` zero bytes, this returns a CRC value of that string // ending in `length` zero bytes, this returns a CRC value of that string
// with those zero bytes removed. // with those zero bytes removed.
virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0; virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0;
......
...@@ -71,7 +71,7 @@ class CrcCordState { ...@@ -71,7 +71,7 @@ class CrcCordState {
struct Rep { struct Rep {
// `removed_prefix` is the crc and length of any prefix that has been // `removed_prefix` is the crc and length of any prefix that has been
// removed from the Cord (for example, by calling // removed from the Cord (for example, by calling
// `CrcCord::RemovePrefix()`). To get the checkum of any prefix of the cord, // `CrcCord::RemovePrefix()`). To get the checksum of any prefix of the cord,
// this value must be subtracted from `prefix_crc`. See `Checksum()` for an // this value must be subtracted from `prefix_crc`. See `Checksum()` for an
// example. // example.
// //
......
...@@ -111,7 +111,7 @@ class CRC32 : public CRCImpl { ...@@ -111,7 +111,7 @@ class CRC32 : public CRCImpl {
// Common implementation guts for ExtendByZeroes and UnextendByZeroes(). // Common implementation guts for ExtendByZeroes and UnextendByZeroes().
// //
// zeroes_table is a table as returned by FillZeroesTable(), containing // zeroes_table is a table as returned by FillZeroesTable(), containing
// polynomials representing CRCs of strings-of-zeros of various lenghts, // polynomials representing CRCs of strings-of-zeros of various lengths,
// and which can be combined by polynomial multiplication. poly_table is // and which can be combined by polynomial multiplication. poly_table is
// a table of CRC byte extension values. These tables are determined by // a table of CRC byte extension values. These tables are determined by
// the generator polynomial. // the generator polynomial.
......
...@@ -40,7 +40,7 @@ using absl::debugging_internal::AddressIsReadable; ...@@ -40,7 +40,7 @@ using absl::debugging_internal::AddressIsReadable;
#if defined(__linux__) && defined(__i386__) #if defined(__linux__) && defined(__i386__)
// Count "push %reg" instructions in VDSO __kernel_vsyscall(), // Count "push %reg" instructions in VDSO __kernel_vsyscall(),
// preceeding "syscall" or "sysenter". // preceding "syscall" or "sysenter".
// If __kernel_vsyscall uses frame pointer, answer 0. // If __kernel_vsyscall uses frame pointer, answer 0.
// //
// kMaxBytes tells how many instruction bytes of __kernel_vsyscall // kMaxBytes tells how many instruction bytes of __kernel_vsyscall
......
...@@ -121,7 +121,7 @@ inline void* Clone(FlagOpFn op, const void* obj) { ...@@ -121,7 +121,7 @@ inline void* Clone(FlagOpFn op, const void* obj) {
flags_internal::CopyConstruct(op, obj, res); flags_internal::CopyConstruct(op, obj, res);
return res; return res;
} }
// Returns true if parsing of input text is successfull. // Returns true if parsing of input text is successful.
inline bool Parse(FlagOpFn op, absl::string_view text, void* dst, inline bool Parse(FlagOpFn op, absl::string_view text, void* dst,
std::string* error) { std::string* error) {
return op(FlagOp::kParse, &text, dst, error) != nullptr; return op(FlagOp::kParse, &text, dst, error) != nullptr;
...@@ -139,12 +139,12 @@ inline size_t Sizeof(FlagOpFn op) { ...@@ -139,12 +139,12 @@ inline size_t Sizeof(FlagOpFn op) {
return static_cast<size_t>(reinterpret_cast<intptr_t>( return static_cast<size_t>(reinterpret_cast<intptr_t>(
op(FlagOp::kSizeof, nullptr, nullptr, nullptr))); op(FlagOp::kSizeof, nullptr, nullptr, nullptr)));
} }
// Returns fast type id coresponding to the value type. // Returns fast type id corresponding to the value type.
inline FlagFastTypeId FastTypeId(FlagOpFn op) { inline FlagFastTypeId FastTypeId(FlagOpFn op) {
return reinterpret_cast<FlagFastTypeId>( return reinterpret_cast<FlagFastTypeId>(
op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr)); op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr));
} }
// Returns fast type id coresponding to the value type. // Returns fast type id corresponding to the value type.
inline const std::type_info* RuntimeTypeId(FlagOpFn op) { inline const std::type_info* RuntimeTypeId(FlagOpFn op) {
return reinterpret_cast<const std::type_info*>( return reinterpret_cast<const std::type_info*>(
op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr)); op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr));
......
...@@ -66,11 +66,11 @@ class FunctionRef; ...@@ -66,11 +66,11 @@ class FunctionRef;
// FunctionRef // FunctionRef
// //
// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with // An `absl::FunctionRef` is a lightweight wrapper to any invocable object with
// a compatible signature. Generally, an `absl::FunctionRef` should only be used // a compatible signature. Generally, an `absl::FunctionRef` should only be used
// as an argument type and should be preferred as an argument over a const // as an argument type and should be preferred as an argument over a const
// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, // reference to a `std::function`. `absl::FunctionRef` itself does not allocate,
// although the wrapped invokable may. // although the wrapped invocable may.
// //
// Example: // Example:
// //
...@@ -98,7 +98,7 @@ class FunctionRef<R(Args...)> { ...@@ -98,7 +98,7 @@ class FunctionRef<R(Args...)> {
std::is_convertible<FR, R>::value>::type; std::is_convertible<FR, R>::value>::type;
public: public:
// Constructs a FunctionRef from any invokable type. // Constructs a FunctionRef from any invocable type.
template <typename F, typename = EnableIfCompatible<const F&>> template <typename F, typename = EnableIfCompatible<const F&>>
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND)
......
...@@ -488,7 +488,7 @@ class CoreImpl { ...@@ -488,7 +488,7 @@ class CoreImpl {
// object. // object.
Clear(); Clear();
// Perform the actual move/destory operation on the target function. // Perform the actual move/destroy operation on the target function.
other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
manager_ = other.manager_; manager_ = other.manager_;
invoker_ = other.invoker_; invoker_ = other.invoker_;
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
// //
// `absl::Hash` may also produce different values from different dynamically // `absl::Hash` may also produce different values from different dynamically
// loaded libraries. For this reason, `absl::Hash` values must never cross // loaded libraries. For this reason, `absl::Hash` values must never cross
// boundries in dynamically loaded libraries (including when used in types like // boundaries in dynamically loaded libraries (including when used in types like
// hash containers.) // hash containers.)
// //
// `absl::Hash` is intended to strongly mix input bits with a target of passing // `absl::Hash` is intended to strongly mix input bits with a target of passing
......
...@@ -49,7 +49,7 @@ namespace { ...@@ -49,7 +49,7 @@ namespace {
// This templated function avoids compiler warnings about tautological // This templated function avoids compiler warnings about tautological
// comparisons when log_internal::Tid is unsigned. It can be replaced with a // comparisons when log_internal::Tid is unsigned. It can be replaced with a
// constexpr if once the minimum C++ version Abseil suppports is C++17. // constexpr if once the minimum C++ version Abseil supports is C++17.
template <typename T> template <typename T>
inline std::enable_if_t<!std::is_signed<T>::value> inline std::enable_if_t<!std::is_signed<T>::value>
PutLeadingWhitespace(T tid, char*& p) { PutLeadingWhitespace(T tid, char*& p) {
......
...@@ -68,7 +68,7 @@ bool DiedOfQFatal(int exit_status) { ...@@ -68,7 +68,7 @@ bool DiedOfQFatal(int exit_status) {
#endif #endif
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Helper for Log inititalization in test // Helper for Log initialization in test
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
void LogTestEnvironment::SetUp() { void LogTestEnvironment::SetUp() {
......
...@@ -54,7 +54,7 @@ bool DiedOfQFatal(int exit_status); ...@@ -54,7 +54,7 @@ bool DiedOfQFatal(int exit_status);
#endif #endif
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Helper for Log inititalization in test // Helper for Log initialization in test
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
class LogTestEnvironment : public ::testing::Environment { class LogTestEnvironment : public ::testing::Environment {
......
...@@ -49,7 +49,7 @@ void TestUniform(URBG* gen) { ...@@ -49,7 +49,7 @@ void TestUniform(URBG* gen) {
// (a, b) semantics, inferred types. // (a, b) semantics, inferred types.
absl::Uniform(absl::IntervalOpenOpen, *gen, 0, 1.0); // Promoted to double absl::Uniform(absl::IntervalOpenOpen, *gen, 0, 1.0); // Promoted to double
// Explict overriding of types. // Explicit overriding of types.
absl::Uniform<int>(*gen, 0, 100); absl::Uniform<int>(*gen, 0, 100);
absl::Uniform<int8_t>(*gen, 0, 100); absl::Uniform<int8_t>(*gen, 0, 100);
absl::Uniform<int16_t>(*gen, 0, 100); absl::Uniform<int16_t>(*gen, 0, 100);
......
...@@ -213,7 +213,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q, ...@@ -213,7 +213,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
double result = 1.; double result = 1.;
int ns = static_cast<int>(q + xc * psq); int ns = static_cast<int>(q + xc * psq);
// Use the soper reduction forumla. // Use the soper reduction formula.
double rx = (ns == 0) ? x : x / xc; double rx = (ns == 0) ? x : x / xc;
double temp = q - ai; double temp = q - ai;
for (;;) { for (;;) {
...@@ -247,7 +247,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q, ...@@ -247,7 +247,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
// https://www.jstor.org/stable/2346798?read-now=1&seq=4#page_scan_tab_contents // https://www.jstor.org/stable/2346798?read-now=1&seq=4#page_scan_tab_contents
// https://www.jstor.org/stable/2346887?seq=1#page_scan_tab_contents // https://www.jstor.org/stable/2346887?seq=1#page_scan_tab_contents
// //
// XINBTA(p, q, beta, alhpa) // XINBTA(p, q, beta, alpha)
// p: the value of the parameter p. // p: the value of the parameter p.
// q: the value of the parameter q. // q: the value of the parameter q.
// beta: the value of ln B(p, q) // beta: the value of ln B(p, q)
......
...@@ -142,7 +142,7 @@ class alignas(8) randen_engine { ...@@ -142,7 +142,7 @@ class alignas(8) randen_engine {
// The Randen paper suggests preferentially initializing even-numbered // The Randen paper suggests preferentially initializing even-numbered
// 128-bit vectors of the randen state (there are 16 such vectors). // 128-bit vectors of the randen state (there are 16 such vectors).
// The seed data is merged into the state offset by 128-bits, which // The seed data is merged into the state offset by 128-bits, which
// implies prefering seed bytes [16..31, ..., 208..223]. Since the // implies preferring seed bytes [16..31, ..., 208..223]. Since the
// buffer is 32-bit values, we swap the corresponding buffer positions in // buffer is 32-bit values, we swap the corresponding buffer positions in
// 128-bit chunks. // 128-bit chunks.
size_t dst = kBufferSize; size_t dst = kBufferSize;
......
...@@ -203,7 +203,7 @@ struct FloatTraits<float> { ...@@ -203,7 +203,7 @@ struct FloatTraits<float> {
if (mantissa > kMantissaMask) { if (mantissa > kMantissaMask) {
// Normal value. // Normal value.
// Adjust by 127 for the exponent representation bias, and an additional // Adjust by 127 for the exponent representation bias, and an additional
// 23 due to the implied decimal point in the IEEE mantissa represenation. // 23 due to the implied decimal point in the IEEE mantissa representation.
flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1) flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1)
<< 23; << 23;
mantissa &= kMantissaMask; mantissa &= kMantissaMask;
...@@ -462,7 +462,7 @@ uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact, ...@@ -462,7 +462,7 @@ uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
// the low bit of `value` is set. // the low bit of `value` is set.
// //
// In inexact mode, the nonzero error means the actual value is greater // In inexact mode, the nonzero error means the actual value is greater
// than the halfway point and we must alway round up. // than the halfway point and we must always round up.
if ((value & 1) == 1 || !input_exact) { if ((value & 1) == 1 || !input_exact) {
++value; ++value;
} }
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
namespace absl { namespace absl {
ABSL_NAMESPACE_BEGIN ABSL_NAMESPACE_BEGIN
// Workalike compatibilty version of std::chars_format from C++17. // Workalike compatibility version of std::chars_format from C++17.
// //
// This is an bitfield enumerator which can be passed to absl::from_chars to // This is an bitfield enumerator which can be passed to absl::from_chars to
// configure the string-to-float conversion. // configure the string-to-float conversion.
...@@ -48,7 +48,7 @@ struct from_chars_result { ...@@ -48,7 +48,7 @@ struct from_chars_result {
std::errc ec; std::errc ec;
}; };
// Workalike compatibilty version of std::from_chars from C++17. Currently // Workalike compatibility version of std::from_chars from C++17. Currently
// this only supports the `double` and `float` types. // this only supports the `double` and `float` types.
// //
// This interface incorporates the proposed resolutions for library issues // This interface incorporates the proposed resolutions for library issues
......
...@@ -794,7 +794,7 @@ int CompareChunks(absl::string_view* lhs, absl::string_view* rhs, ...@@ -794,7 +794,7 @@ int CompareChunks(absl::string_view* lhs, absl::string_view* rhs,
} }
// This overload set computes comparison results from memcmp result. This // This overload set computes comparison results from memcmp result. This
// interface is used inside GenericCompare below. Differet implementations // interface is used inside GenericCompare below. Different implementations
// are specialized for int and bool. For int we clamp result to {-1, 0, 1} // are specialized for int and bool. For int we clamp result to {-1, 0, 1}
// set. For bool we just interested in "value == 0". // set. For bool we just interested in "value == 0".
template <typename ResultType> template <typename ResultType>
......
...@@ -661,7 +661,7 @@ class Cord { ...@@ -661,7 +661,7 @@ class Cord {
class CharRange { class CharRange {
public: public:
// Fulfill minimum c++ container requirements [container.requirements] // Fulfill minimum c++ container requirements [container.requirements]
// Theses (partial) container type definitions allow CharRange to be used // These (partial) container type definitions allow CharRange to be used
// in various utilities expecting a subset of [container.requirements]. // in various utilities expecting a subset of [container.requirements].
// For example, the below enables using `::testing::ElementsAre(...)` // For example, the below enables using `::testing::ElementsAre(...)`
using value_type = char; using value_type = char;
......
...@@ -51,7 +51,7 @@ enum class TestCordSize { ...@@ -51,7 +51,7 @@ enum class TestCordSize {
// existing inputs rather than copying contents of the input. // existing inputs rather than copying contents of the input.
kMedium = cord_internal::kMaxFlatLength / 2 + 1, kMedium = cord_internal::kMaxFlatLength / 2 + 1,
// A string value large enough to cause it to be stored in mutliple flats. // A string value large enough to cause it to be stored in multiple flats.
kLarge = cord_internal::kMaxFlatLength * 4 kLarge = cord_internal::kMaxFlatLength * 4
}; };
......
...@@ -92,7 +92,7 @@ class BigUnsigned { ...@@ -92,7 +92,7 @@ class BigUnsigned {
// numbers with this many decimal digits or fewer are representable by this // numbers with this many decimal digits or fewer are representable by this
// type. // type.
// //
// Analagous to std::numeric_limits<BigUnsigned>::digits10. // Analogous to std::numeric_limits<BigUnsigned>::digits10.
static constexpr int Digits10() { static constexpr int Digits10() {
// 9975007/1035508 is very slightly less than log10(2**32). // 9975007/1035508 is very slightly less than log10(2**32).
return static_cast<uint64_t>(max_words) * 9975007 / 1035508; return static_cast<uint64_t>(max_words) * 9975007 / 1035508;
......
...@@ -507,7 +507,7 @@ TEST_P(CordRepBtreeTest, AppendToTreeTwoDeep) { ...@@ -507,7 +507,7 @@ TEST_P(CordRepBtreeTest, AppendToTreeTwoDeep) {
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) { for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
// Ref top level tree based on param. // Ref top level tree based on param.
// Ref child node once every 16 iterations, and leaf node every 4 // Ref child node once every 16 iterations, and leaf node every 4
// iterrations which which should not have an observable effect other than // iterations which which should not have an observable effect other than
// the node and/or the leaf below it being copied. // the node and/or the leaf below it being copied.
refs.RefIf(shared(), tree); refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back()); refs.RefIf(i % 16 == 0, tree->Edges().back());
...@@ -568,7 +568,7 @@ TEST_P(CordRepBtreeTest, PrependToTreeTwoDeep) { ...@@ -568,7 +568,7 @@ TEST_P(CordRepBtreeTest, PrependToTreeTwoDeep) {
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) { for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
// Ref top level tree based on param. // Ref top level tree based on param.
// Ref child node once every 16 iterations, and leaf node every 4 // Ref child node once every 16 iterations, and leaf node every 4
// iterrations which which should not have an observable effect other than // iterations which which should not have an observable effect other than
// the node and/or the leaf below it being copied. // the node and/or the leaf below it being copied.
refs.RefIf(shared(), tree); refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back()); refs.RefIf(i % 16 == 0, tree->Edges().back());
......
...@@ -472,7 +472,7 @@ class CordRepRing : public CordRep { ...@@ -472,7 +472,7 @@ class CordRepRing : public CordRep {
// Increases the data offset for entry `index` by `n`. // Increases the data offset for entry `index` by `n`.
void AddDataOffset(index_type index, size_t n); void AddDataOffset(index_type index, size_t n);
// Descreases the length for entry `index` by `n`. // Decreases the length for entry `index` by `n`.
void SubLength(index_type index, size_t n); void SubLength(index_type index, size_t n);
index_type head_; index_type head_;
......
...@@ -38,7 +38,7 @@ TEST(CordzFunctionsTest, SampleRate) { ...@@ -38,7 +38,7 @@ TEST(CordzFunctionsTest, SampleRate) {
} }
// Cordz is disabled when we don't have thread_local. All calls to // Cordz is disabled when we don't have thread_local. All calls to
// should_profile will return false when cordz is diabled, so we might want to // should_profile will return false when cordz is disabled, so we might want to
// avoid those tests. // avoid those tests.
#ifdef ABSL_INTERNAL_CORDZ_ENABLED #ifdef ABSL_INTERNAL_CORDZ_ENABLED
......
...@@ -54,7 +54,7 @@ namespace { ...@@ -54,7 +54,7 @@ namespace {
// The top level node is treated specially: we assume the current thread // The top level node is treated specially: we assume the current thread
// (typically called from the CordzHandler) to hold a reference purely to // (typically called from the CordzHandler) to hold a reference purely to
// perform a safe analysis, and not being part of the application. So we // perform a safe analysis, and not being part of the application. So we
// substract 1 from the reference count of the top node to compute the // subtract 1 from the reference count of the top node to compute the
// 'application fair share' excluding the reference of the current thread. // 'application fair share' excluding the reference of the current thread.
// //
// An example of fair sharing, and why we multiply reference counts: // An example of fair sharing, and why we multiply reference counts:
......
...@@ -33,7 +33,7 @@ namespace cord_internal { ...@@ -33,7 +33,7 @@ namespace cord_internal {
// ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail // ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail
// //
// This list tracks that CH1 and CH2 were created after ST1, so the thread // This list tracks that CH1 and CH2 were created after ST1, so the thread
// holding ST1 might have a referece to CH1, CH2, ST2, and CH3. However, ST2 was // holding ST1 might have a reference to CH1, CH2, ST2, and CH3. However, ST2 was
// created later, so the thread holding the ST2 token cannot have a reference to // created later, so the thread holding the ST2 token cannot have a reference to
// ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will delete ST1, // ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will delete ST1,
// CH1, and CH2. If instead ST2 is cleaned up first, that thread will only // CH1, and CH2. If instead ST2 is cleaned up first, that thread will only
......
...@@ -54,7 +54,7 @@ TEST(Distance, TestDistances) { ...@@ -54,7 +54,7 @@ TEST(Distance, TestDistances) {
} }
TEST(Distance, TestCutoff) { TEST(Distance, TestCutoff) {
// Returing cutoff + 1 if the value is larger than cutoff or string longer // Returning cutoff + 1 if the value is larger than cutoff or string longer
// than MAX_SIZE. // than MAX_SIZE.
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 3), uint8_t{3}); EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 3), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 2), uint8_t{3}); EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 2), uint8_t{3});
......
...@@ -711,7 +711,7 @@ bool IncrementNibble(size_t nibble_index, Int* n) { ...@@ -711,7 +711,7 @@ bool IncrementNibble(size_t nibble_index, Int* n) {
constexpr size_t kShift = sizeof(Int) * 8 - 1; constexpr size_t kShift = sizeof(Int) * 8 - 1;
constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4; constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4;
Int before = *n >> kShift; Int before = *n >> kShift;
// Here we essentially want to take the number 1 and move it into the requsted // Here we essentially want to take the number 1 and move it into the requested
// nibble, then add it to *n to effectively increment the nibble. However, // nibble, then add it to *n to effectively increment the nibble. However,
// ASan will complain if we try to shift the 1 beyond the limits of the Int, // ASan will complain if we try to shift the 1 beyond the limits of the Int,
// i.e., if the nibble_index is out of range. So therefore we check for this // i.e., if the nibble_index is out of range. So therefore we check for this
......
...@@ -219,7 +219,7 @@ char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) { ...@@ -219,7 +219,7 @@ char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
if (i < 0) { if (i < 0) {
*buffer++ = '-'; *buffer++ = '-';
// We need to do the negation in modular (i.e., "unsigned") // We need to do the negation in modular (i.e., "unsigned")
// arithmetic; MSVC++ apprently warns for plain "-u", so // arithmetic; MSVC++ apparently warns for plain "-u", so
// we write the equivalent expression "0 - u" instead. // we write the equivalent expression "0 - u" instead.
u = 0 - u; u = 0 - u;
} }
......
...@@ -104,7 +104,7 @@ struct PosixTransition { ...@@ -104,7 +104,7 @@ struct PosixTransition {
// The entirety of a POSIX-string specified time-zone rule. The standard // The entirety of a POSIX-string specified time-zone rule. The standard
// abbreviation and offset are always given. If the time zone includes // abbreviation and offset are always given. If the time zone includes
// daylight saving, then the daylight abbrevation is non-empty and the // daylight saving, then the daylight abbreviation is non-empty and the
// remaining fields are also valid. Note that the start/end transitions // remaining fields are also valid. Note that the start/end transitions
// are not ordered---in the southern hemisphere the transition to end // are not ordered---in the southern hemisphere the transition to end
// daylight time occurs first in any particular year. // daylight time occurs first in any particular year.
......
...@@ -125,7 +125,7 @@ struct IsView< ...@@ -125,7 +125,7 @@ struct IsView<
}; };
// These enablers result in 'int' so they can be used as typenames or defaults // These enablers result in 'int' so they can be used as typenames or defaults
// in template paramters lists. // in template parameters lists.
template <typename T> template <typename T>
using EnableIfIsView = std::enable_if_t<IsView<T>::value, int>; using EnableIfIsView = std::enable_if_t<IsView<T>::value, int>;
......
...@@ -33,7 +33,7 @@ def ReplaceStringsInFile(filename, replacement_dict): ...@@ -33,7 +33,7 @@ def ReplaceStringsInFile(filename, replacement_dict):
values values
Raises: Raises:
Exception: A failure occured Exception: A failure occurred
""" """
f = open(filename, 'r') f = open(filename, 'r')
content = f.read() content = f.read()
...@@ -62,7 +62,7 @@ def StripContentBetweenTags(filename, strip_begin_tag, strip_end_tag): ...@@ -62,7 +62,7 @@ def StripContentBetweenTags(filename, strip_begin_tag, strip_end_tag):
strip_end_tag: the end of the content to be removed strip_end_tag: the end of the content to be removed
Raises: Raises:
Exception: A failure occured Exception: A failure occurred
""" """
f = open(filename, 'r') f = open(filename, 'r')
content = f.read() content = f.read()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment