Commit fa108c44 by Derek Mauro Committed by Copybara-Service

Rollback of fix "unsafe narrowing" warnings in absl, 8/n.

Addresses failures with the following, in some files:
-Wshorten-64-to-32
-Wimplicit-int-conversion
-Wsign-compare
-Wsign-conversion
-Wtautological-unsigned-zero-compare

(This specific CL focuses on .cc files in */internal/.)

Bug: chromium:1292951
PiperOrigin-RevId: 471561809
Change-Id: I7abd6d83706f5ca135f1ce3458192a498a6280b9
parent 847fa56a
...@@ -97,8 +97,7 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, ...@@ -97,8 +97,7 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
#ifdef __BIONIC__ #ifdef __BIONIC__
// SYS_mmap2 has problems on Android API level <= 16. // SYS_mmap2 has problems on Android API level <= 16.
// Workaround by invoking __mmap2() instead. // Workaround by invoking __mmap2() instead.
return __mmap2(start, length, prot, flags, fd, return __mmap2(start, length, prot, flags, fd, offset / pagesize);
static_cast<size_t>(offset / pagesize));
#else #else
return reinterpret_cast<void*>( return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd, syscall(SYS_mmap2, start, length, prot, flags, fd,
......
...@@ -332,7 +332,7 @@ size_t GetPageSize() { ...@@ -332,7 +332,7 @@ size_t GetPageSize() {
#elif defined(__wasm__) || defined(__asmjs__) #elif defined(__wasm__) || defined(__asmjs__)
return getpagesize(); return getpagesize();
#else #else
return static_cast<size_t>(sysconf(_SC_PAGESIZE)); return sysconf(_SC_PAGESIZE);
#endif #endif
} }
......
...@@ -136,7 +136,7 @@ static int GetNumCPUs() { ...@@ -136,7 +136,7 @@ static int GetNumCPUs() {
// Other possibilities: // Other possibilities:
// - Read /sys/devices/system/cpu/online and use cpumask_parse() // - Read /sys/devices/system/cpu/online and use cpumask_parse()
// - sysconf(_SC_NPROCESSORS_ONLN) // - sysconf(_SC_NPROCESSORS_ONLN)
return static_cast<int>(std::thread::hardware_concurrency()); return std::thread::hardware_concurrency();
#endif #endif
} }
...@@ -194,7 +194,7 @@ static bool ReadLongFromFile(const char *file, long *value) { ...@@ -194,7 +194,7 @@ static bool ReadLongFromFile(const char *file, long *value) {
char line[1024]; char line[1024];
char *err; char *err;
memset(line, '\0', sizeof(line)); memset(line, '\0', sizeof(line));
ssize_t len = read(fd, line, sizeof(line) - 1); int len = read(fd, line, sizeof(line) - 1);
if (len <= 0) { if (len <= 0) {
ret = false; ret = false;
} else { } else {
...@@ -376,7 +376,7 @@ pid_t GetTID() { ...@@ -376,7 +376,7 @@ pid_t GetTID() {
#endif #endif
pid_t GetTID() { pid_t GetTID() {
return static_cast<pid_t>(syscall(SYS_gettid)); return syscall(SYS_gettid);
} }
#elif defined(__akaros__) #elif defined(__akaros__)
...@@ -429,11 +429,11 @@ static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. ...@@ -429,11 +429,11 @@ static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
// Returns the TID to tid_array. // Returns the TID to tid_array.
static void FreeTID(void *v) { static void FreeTID(void *v) {
intptr_t tid = reinterpret_cast<intptr_t>(v); intptr_t tid = reinterpret_cast<intptr_t>(v);
intptr_t word = tid / kBitsPerWord; int word = tid / kBitsPerWord;
uint32_t mask = ~(1u << (tid % kBitsPerWord)); uint32_t mask = ~(1u << (tid % kBitsPerWord));
absl::base_internal::SpinLockHolder lock(&tid_lock); absl::base_internal::SpinLockHolder lock(&tid_lock);
assert(0 <= word && static_cast<size_t>(word) < tid_array->size()); assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
(*tid_array)[static_cast<size_t>(word)] &= mask; (*tid_array)[word] &= mask;
} }
static void InitGetTID() { static void InitGetTID() {
...@@ -455,7 +455,7 @@ pid_t GetTID() { ...@@ -455,7 +455,7 @@ pid_t GetTID() {
intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
if (tid != 0) { if (tid != 0) {
return static_cast<pid_t>(tid); return tid;
} }
int bit; // tid_array[word] = 1u << bit; int bit; // tid_array[word] = 1u << bit;
...@@ -476,8 +476,7 @@ pid_t GetTID() { ...@@ -476,8 +476,7 @@ pid_t GetTID() {
while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
++bit; ++bit;
} }
tid = tid = (word * kBitsPerWord) + bit;
static_cast<intptr_t>((word * kBitsPerWord) + static_cast<size_t>(bit));
(*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
} }
......
...@@ -641,8 +641,8 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values, ...@@ -641,8 +641,8 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
SizeType<A> insert_count) -> Iterator<A> { SizeType<A> insert_count) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView(); StorageView<A> storage_view = MakeStorageView();
auto insert_index = static_cast<SizeType<A>>( SizeType<A> insert_index =
std::distance(ConstIterator<A>(storage_view.data), pos)); std::distance(ConstIterator<A>(storage_view.data), pos);
SizeType<A> insert_end_index = insert_index + insert_count; SizeType<A> insert_end_index = insert_index + insert_count;
SizeType<A> new_size = storage_view.size + insert_count; SizeType<A> new_size = storage_view.size + insert_count;
......
...@@ -612,9 +612,9 @@ struct GroupAArch64Impl { ...@@ -612,9 +612,9 @@ struct GroupAArch64Impl {
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const { NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
uint64_t mask = uint64_t mask =
vget_lane_u64(vreinterpret_u64_u8(vceq_s8( vget_lane_u64(vreinterpret_u64_u8(
vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)), vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)),
vreinterpret_s8_u8(ctrl))), vreinterpret_s8_u8(ctrl))),
0); 0);
return NonIterableBitMask<uint64_t, kWidth, 3>(mask); return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
} }
...@@ -1144,12 +1144,11 @@ class raw_hash_set { ...@@ -1144,12 +1144,11 @@ class raw_hash_set {
std::is_nothrow_default_constructible<key_equal>::value&& std::is_nothrow_default_constructible<key_equal>::value&&
std::is_nothrow_default_constructible<allocator_type>::value) {} std::is_nothrow_default_constructible<allocator_type>::value) {}
explicit raw_hash_set(size_t bucket_count, explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const hasher& hash = hasher(),
const key_equal& eq = key_equal(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type()) const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()), : ctrl_(EmptyGroup()),
settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) { settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) { if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count); capacity_ = NormalizeCapacity(bucket_count);
initialize_slots(); initialize_slots();
...@@ -1274,16 +1273,14 @@ class raw_hash_set { ...@@ -1274,16 +1273,14 @@ class raw_hash_set {
std::is_nothrow_copy_constructible<allocator_type>::value) std::is_nothrow_copy_constructible<allocator_type>::value)
: ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
slots_(absl::exchange(that.slots_, nullptr)), slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, size_t{0})), size_(absl::exchange(that.size_, 0)),
capacity_(absl::exchange(that.capacity_, size_t{0})), capacity_(absl::exchange(that.capacity_, 0)),
// Hash, equality and allocator are copied instead of moved because // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it // `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called. // would create a nullptr functor that cannot be called.
settings_(absl::exchange(that.growth_left(), size_t{0}), settings_(absl::exchange(that.growth_left(), 0),
absl::exchange(that.infoz(), HashtablezInfoHandle()), absl::exchange(that.infoz(), HashtablezInfoHandle()),
that.hash_ref(), that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
that.eq_ref(),
that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a) raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()), : ctrl_(EmptyGroup()),
......
...@@ -406,7 +406,7 @@ template <typename StorageT> ...@@ -406,7 +406,7 @@ template <typename StorageT>
StorageT* FlagImpl::OffsetValue() const { StorageT* FlagImpl::OffsetValue() const {
char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this)); char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this));
// The offset is deduced via Flag value type specific op_. // The offset is deduced via Flag value type specific op_.
ptrdiff_t offset = flags_internal::ValueOffset(op_); size_t offset = flags_internal::ValueOffset(op_);
return reinterpret_cast<StorageT*>(p + offset); return reinterpret_cast<StorageT*>(p + offset);
} }
...@@ -486,7 +486,7 @@ bool FlagImpl::ReadOneBool() const { ...@@ -486,7 +486,7 @@ bool FlagImpl::ReadOneBool() const {
} }
void FlagImpl::ReadSequenceLockedData(void* dst) const { void FlagImpl::ReadSequenceLockedData(void* dst) const {
size_t size = Sizeof(op_); int size = Sizeof(op_);
// Attempt to read using the sequence lock. // Attempt to read using the sequence lock.
if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) { if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) {
return; return;
......
...@@ -148,7 +148,8 @@ class FlagHelpPrettyPrinter { ...@@ -148,7 +148,8 @@ class FlagHelpPrettyPrinter {
} }
// Write the token, ending the string first if necessary/possible. // Write the token, ending the string first if necessary/possible.
if (!new_line && (line_len_ + token.size() >= max_line_len_)) { if (!new_line &&
(line_len_ + static_cast<int>(token.size()) >= max_line_len_)) {
EndLine(); EndLine();
new_line = true; new_line = true;
} }
......
...@@ -94,14 +94,13 @@ double AndersonDarlingPValue(int n, double z) { ...@@ -94,14 +94,13 @@ double AndersonDarlingPValue(int n, double z) {
} }
double AndersonDarlingStatistic(const std::vector<double>& random_sample) { double AndersonDarlingStatistic(const std::vector<double>& random_sample) {
size_t n = random_sample.size(); int n = random_sample.size();
double ad_sum = 0; double ad_sum = 0;
for (size_t i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
ad_sum += (2 * i + 1) * ad_sum += (2 * i + 1) *
std::log(random_sample[i] * (1 - random_sample[n - 1 - i])); std::log(random_sample[i] * (1 - random_sample[n - 1 - i]));
} }
const auto n_as_double = static_cast<double>(n); double ad_statistic = -n - 1 / static_cast<double>(n) * ad_sum;
double ad_statistic = -n_as_double - 1 / n_as_double * ad_sum;
return ad_statistic; return ad_statistic;
} }
...@@ -112,15 +111,14 @@ double AndersonDarlingStatistic(const std::vector<double>& random_sample) { ...@@ -112,15 +111,14 @@ double AndersonDarlingStatistic(const std::vector<double>& random_sample) {
// Marsaglia and Marsaglia for details. // Marsaglia and Marsaglia for details.
double AndersonDarlingTest(const std::vector<double>& random_sample) { double AndersonDarlingTest(const std::vector<double>& random_sample) {
double ad_statistic = AndersonDarlingStatistic(random_sample); double ad_statistic = AndersonDarlingStatistic(random_sample);
double p = AndersonDarlingPValue(static_cast<int>(random_sample.size()), double p = AndersonDarlingPValue(random_sample.size(), ad_statistic);
ad_statistic);
return p; return p;
} }
TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) {
ExponentialBiased eb; ExponentialBiased eb;
for (int runs = 0; runs < 10; ++runs) { for (int runs = 0; runs < 10; ++runs) {
for (int64_t flips = eb.GetSkipCount(1); flips > 0; --flips) { for (int flips = eb.GetSkipCount(1); flips > 0; --flips) {
printf("head..."); printf("head...");
} }
printf("tail\n"); printf("tail\n");
...@@ -134,7 +132,7 @@ TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { ...@@ -134,7 +132,7 @@ TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) {
TEST(ExponentialBiasedTest, SampleDemoWithStride) { TEST(ExponentialBiasedTest, SampleDemoWithStride) {
ExponentialBiased eb; ExponentialBiased eb;
int64_t stride = eb.GetStride(10); int stride = eb.GetStride(10);
int samples = 0; int samples = 0;
for (int i = 0; i < 10000000; ++i) { for (int i = 0; i < 10000000; ++i) {
if (--stride == 0) { if (--stride == 0) {
...@@ -149,7 +147,7 @@ TEST(ExponentialBiasedTest, SampleDemoWithStride) { ...@@ -149,7 +147,7 @@ TEST(ExponentialBiasedTest, SampleDemoWithStride) {
// Testing that NextRandom generates uniform random numbers. Applies the // Testing that NextRandom generates uniform random numbers. Applies the
// Anderson-Darling test for uniformity // Anderson-Darling test for uniformity
TEST(ExponentialBiasedTest, TestNextRandom) { TEST(ExponentialBiasedTest, TestNextRandom) {
for (auto n : std::vector<size_t>({ for (auto n : std::vector<int>({
10, // Check short-range correlation 10, // Check short-range correlation
100, 1000, 100, 1000,
10000 // Make sure there's no systemic error 10000 // Make sure there's no systemic error
...@@ -163,7 +161,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) { ...@@ -163,7 +161,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) {
} }
std::vector<uint64_t> int_random_sample(n); std::vector<uint64_t> int_random_sample(n);
// Collect samples // Collect samples
for (size_t i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
int_random_sample[i] = x; int_random_sample[i] = x;
x = ExponentialBiased::NextRandom(x); x = ExponentialBiased::NextRandom(x);
} }
...@@ -171,7 +169,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) { ...@@ -171,7 +169,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) {
std::sort(int_random_sample.begin(), int_random_sample.end()); std::sort(int_random_sample.begin(), int_random_sample.end());
std::vector<double> random_sample(n); std::vector<double> random_sample(n);
// Convert them to uniform randoms (in the range [0,1]) // Convert them to uniform randoms (in the range [0,1])
for (size_t i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
random_sample[i] = random_sample[i] =
static_cast<double>(int_random_sample[i]) / max_prng_value; static_cast<double>(int_random_sample[i]) / max_prng_value;
} }
......
...@@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) { ...@@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) {
} }
while (success && buffer_size > 0) { while (success && buffer_size > 0) {
ssize_t bytes_read = read(dev_urandom, buffer, buffer_size); int bytes_read = read(dev_urandom, buffer, buffer_size);
int read_error = errno; int read_error = errno;
success = (bytes_read > 0); success = (bytes_read > 0);
if (success) { if (success) {
buffer += bytes_read; buffer += bytes_read;
buffer_size -= static_cast<size_t>(bytes_read); buffer_size -= bytes_read;
} else if (bytes_read == -1 && read_error == EINTR) { } else if (bytes_read == -1 && read_error == EINTR) {
success = true; // Need to try again. success = true; // Need to try again.
} }
......
...@@ -87,7 +87,7 @@ class FutexImpl { ...@@ -87,7 +87,7 @@ class FutexImpl {
public: public:
static int WaitUntil(std::atomic<int32_t> *v, int32_t val, static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
KernelTimeout t) { KernelTimeout t) {
long err = 0; // NOLINT(runtime/int) int err = 0;
if (t.has_timeout()) { if (t.has_timeout()) {
// https://locklessinc.com/articles/futex_cheat_sheet/ // https://locklessinc.com/articles/futex_cheat_sheet/
// Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
...@@ -105,44 +105,41 @@ class FutexImpl { ...@@ -105,44 +105,41 @@ class FutexImpl {
FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
} }
if (ABSL_PREDICT_FALSE(err != 0)) { if (ABSL_PREDICT_FALSE(err != 0)) {
return -errno; err = -errno;
} }
return 0; return err;
} }
static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val, static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
int32_t bits, int32_t bits,
const struct timespec *abstime) { const struct timespec *abstime) {
// NOLINTNEXTLINE(runtime/int) int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, nullptr, bits);
nullptr, bits);
if (ABSL_PREDICT_FALSE(err != 0)) { if (ABSL_PREDICT_FALSE(err != 0)) {
return -errno; err = -errno;
} }
return 0; return err;
} }
static int Wake(std::atomic<int32_t> *v, int32_t count) { static int Wake(std::atomic<int32_t> *v, int32_t count) {
// NOLINTNEXTLINE(runtime/int) int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) { if (ABSL_PREDICT_FALSE(err < 0)) {
return -errno; err = -errno;
} }
return 0; return err;
} }
// FUTEX_WAKE_BITSET // FUTEX_WAKE_BITSET
static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) { static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
// NOLINTNEXTLINE(runtime/int) int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, nullptr, bits);
nullptr, bits);
if (ABSL_PREDICT_FALSE(err < 0)) { if (ABSL_PREDICT_FALSE(err < 0)) {
return -errno; err = -errno;
} }
return 0; return err;
} }
}; };
......
...@@ -84,15 +84,14 @@ class TestZoneInfoSource : public cctz::ZoneInfoSource { ...@@ -84,15 +84,14 @@ class TestZoneInfoSource : public cctz::ZoneInfoSource {
: data_(data), end_(data + size) {} : data_(data), end_(data + size) {}
std::size_t Read(void* ptr, std::size_t size) override { std::size_t Read(void* ptr, std::size_t size) override {
const std::size_t len = const std::size_t len = std::min<std::size_t>(size, end_ - data_);
std::min(size, static_cast<std::size_t>(end_ - data_));
memcpy(ptr, data_, len); memcpy(ptr, data_, len);
data_ += len; data_ += len;
return len; return len;
} }
int Skip(std::size_t offset) override { int Skip(std::size_t offset) override {
data_ += std::min(offset, static_cast<std::size_t>(end_ - data_)); data_ += std::min<std::size_t>(offset, end_ - data_);
return 0; return 0;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment