Commit 3e59efa2 by Shahriar Rouf Committed by Copybara-Service

Optimize `absl::Hash` by making `LowLevelHash` faster.

Throughput of the 64 byte chunk loop inside `LowLevelHash` (or now in `LowLevelHashLenGt16`) gets limited by the loop carried dependency on `current_state`. By using 4 states instead of 2, we can reduce this duration by 1 cycle. On Skylake, it is reduced from 9 cycles to 8 cycles (12.5% faster asymptotically).

To see the reduction in a simplified version of `LowLevelHash` implementation on Skylake:
* Before: https://godbolt.org/z/Tcj9vsGax, llvm-mca (https://godbolt.org/z/3o78Msr63) shows 9 cycles / iteration.
* After: https://godbolt.org/z/q4GM4EjPr, llvm-mca (https://godbolt.org/z/W5d1KEMzq) shows 8 cycles / iteration.
* This CL is removing 1 xor (1 cycle) per iteration from the critical path.

A block for 32 byte chunk is also added.

Finally, just before returning, `Mix` is called 1 time instead of twice.

PiperOrigin-RevId: 605090653
Change-Id: Ib7517ebb8bef7484066cd14cf41a943953e93377
parent f4c713f5
...@@ -61,7 +61,7 @@ constexpr uint64_t kHashSalt[5] = { ...@@ -61,7 +61,7 @@ constexpr uint64_t kHashSalt[5] = {
uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data, uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
size_t len) { size_t len) {
return LowLevelHash(data, len, Seed(), kHashSalt); return LowLevelHashLenGt16(data, len, Seed(), kHashSalt);
} }
} // namespace hash_internal } // namespace hash_internal
......
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include "absl/hash/internal/low_level_hash.h" #include "absl/hash/internal/low_level_hash.h"
#include <cstddef>
#include <cstdint>
#include "absl/base/internal/unaligned_access.h" #include "absl/base/internal/unaligned_access.h"
#include "absl/base/prefetch.h" #include "absl/base/prefetch.h"
#include "absl/numeric/int128.h" #include "absl/numeric/int128.h"
...@@ -28,7 +31,7 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) { ...@@ -28,7 +31,7 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) {
return absl::Uint128Low64(p) ^ absl::Uint128High64(p); return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
} }
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) { const uint64_t salt[5]) {
// Prefetch the cacheline that data resides in. // Prefetch the cacheline that data resides in.
PrefetchToLocalCache(data); PrefetchToLocalCache(data);
...@@ -40,7 +43,9 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, ...@@ -40,7 +43,9 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
// If we have more than 64 bytes, we're going to handle chunks of 64 // If we have more than 64 bytes, we're going to handle chunks of 64
// bytes at a time. We're going to build up two separate hash states // bytes at a time. We're going to build up two separate hash states
// which we will then hash together. // which we will then hash together.
uint64_t duplicated_state = current_state; uint64_t duplicated_state0 = current_state;
uint64_t duplicated_state1 = current_state;
uint64_t duplicated_state2 = current_state;
do { do {
// Always prefetch the next cacheline. // Always prefetch the next cacheline.
...@@ -55,24 +60,39 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, ...@@ -55,24 +60,39 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48); uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56); uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state); current_state = Mix(a ^ salt[1], b ^ current_state);
uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state); duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0);
current_state = (cs0 ^ cs1);
uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state); duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1);
uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state); duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2);
duplicated_state = (ds0 ^ ds1);
ptr += 64; ptr += 64;
len -= 64; len -= 64;
} while (len > 64); } while (len > 64);
current_state = current_state ^ duplicated_state; current_state = (current_state ^ duplicated_state0) ^
(duplicated_state1 + duplicated_state2);
} }
// We now have a data `ptr` with at most 64 bytes and the current state // We now have a data `ptr` with at most 64 bytes and the current state
// of the hashing state machine stored in current_state. // of the hashing state machine stored in current_state.
while (len > 16) { if (len > 32) {
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
current_state = cs0 ^ cs1;
ptr += 32;
len -= 32;
}
// We now have a data `ptr` with at most 32 bytes and the current state
// of the hashing state machine stored in current_state.
if (len > 16) {
uint64_t a = absl::base_internal::UnalignedLoad64(ptr); uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
...@@ -82,13 +102,33 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, ...@@ -82,13 +102,33 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
len -= 16; len -= 16;
} }
// We now have a data `ptr` with at most 16 bytes. // We now have a data `ptr` with at least 1 and at most 16 bytes. But we can
// safely read from `ptr + len - 16`.
uint64_t a = absl::base_internal::UnalignedLoad64(ptr + len - 16);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
}
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) {
if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt);
// Prefetch the cacheline that data resides in.
PrefetchToLocalCache(data);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
uint64_t current_state = seed ^ salt[0];
if (len == 0) return current_state;
uint64_t a = 0; uint64_t a = 0;
uint64_t b = 0; uint64_t b = 0;
// We now have a data `ptr` with at least 1 and at most 16 bytes.
if (len > 8) { if (len > 8) {
// When we have at least 9 and at most 16 bytes, set A to the first 64 // When we have at least 9 and at most 16 bytes, set A to the first 64
// bits of the input and B to the last 64 bits of the input. Yes, they will // bits of the input and B to the last 64 bits of the input. Yes, they
// overlap in the middle if we are working with less than the full 16 // will overlap in the middle if we are working with less than the full 16
// bytes. // bytes.
a = absl::base_internal::UnalignedLoad64(ptr); a = absl::base_internal::UnalignedLoad64(ptr);
b = absl::base_internal::UnalignedLoad64(ptr + len - 8); b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
...@@ -97,20 +137,14 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, ...@@ -97,20 +137,14 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
// bits and B to the last 32 bits. // bits and B to the last 32 bits.
a = absl::base_internal::UnalignedLoad32(ptr); a = absl::base_internal::UnalignedLoad32(ptr);
b = absl::base_internal::UnalignedLoad32(ptr + len - 4); b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
} else if (len > 0) {
// If we have at least 1 and at most 3 bytes, read all of the provided
// bits into A, with some adjustments.
a = static_cast<uint64_t>((ptr[0] << 16) | (ptr[len >> 1] << 8) |
ptr[len - 1]);
b = 0;
} else { } else {
a = 0; // If we have at least 1 and at most 3 bytes, read 2 bytes into A and the
b = 0; // other byte into B, with some adjustments.
a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]);
b = static_cast<uint64_t>(ptr[len >> 1]);
} }
uint64_t w = Mix(a ^ salt[1], b ^ current_state); return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
uint64_t z = salt[1] ^ starting_length;
return Mix(w, z);
} }
} // namespace hash_internal } // namespace hash_internal
......
...@@ -43,6 +43,10 @@ namespace hash_internal { ...@@ -43,6 +43,10 @@ namespace hash_internal {
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]); const uint64_t salt[5]);
// Same as above except the length must be greater than 16.
uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]);
} // namespace hash_internal } // namespace hash_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment