Commit 48cd2c3f by Abseil Team Committed by Derek Mauro

Export of internal Abseil changes.

--
4eacae3ff1b14b1d309e8092185bc10e8a6203cf by Derek Mauro <dmauro@google.com>:

Release SwissTable - a fast, efficient, cache-friendly hash table.

https://www.youtube.com/watch?v=ncHmEUmJZf4

PiperOrigin-RevId: 214816527

--
df8c3dfab3cfb2f4365909a84d0683b193cfbb11 by Derek Mauro <dmauro@google.com>:

Internal change

PiperOrigin-RevId: 214785288

--
1eabd5266bbcebc33eecc91e5309b751856a75c8 by Abseil Team <absl-team@google.com>:

Internal change

PiperOrigin-RevId: 214722931

--
2ebbfac950f83146b46253038e7dd7dcde9f2951 by Derek Mauro <dmauro@google.com>:

Internal change

PiperOrigin-RevId: 214701684
GitOrigin-RevId: 4eacae3ff1b14b1d309e8092185bc10e8a6203cf
Change-Id: I9ba64e395b22ad7863213d157b8019b082adc19d
parent e291c279
...@@ -20,6 +20,7 @@ add_subdirectory(base) ...@@ -20,6 +20,7 @@ add_subdirectory(base)
add_subdirectory(algorithm) add_subdirectory(algorithm)
add_subdirectory(container) add_subdirectory(container)
add_subdirectory(debugging) add_subdirectory(debugging)
add_subdirectory(hash)
add_subdirectory(memory) add_subdirectory(memory)
add_subdirectory(meta) add_subdirectory(meta)
add_subdirectory(numeric) add_subdirectory(numeric)
......
...@@ -185,3 +185,459 @@ cc_test( ...@@ -185,3 +185,459 @@ cc_test(
"@com_google_googletest//:gtest_main", "@com_google_googletest//:gtest_main",
], ],
) )
NOTEST_TAGS_NONMOBILE = [
"no_test_darwin_x86_64",
"no_test_loonix",
]
NOTEST_TAGS_MOBILE = [
"no_test_android_arm",
"no_test_android_arm64",
"no_test_android_x86",
"no_test_ios_x86_64",
]
NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE
cc_library(
name = "flat_hash_map",
hdrs = ["flat_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":container_memory",
":hash_function_defaults",
":raw_hash_map",
"//absl/memory",
],
)
cc_test(
name = "flat_hash_map_test",
srcs = ["flat_hash_map_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"],
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":flat_hash_map",
":hash_generator_testing",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_modifiers_test",
"//absl/types:any",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "flat_hash_set",
hdrs = ["flat_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":container_memory",
":hash_function_defaults",
":raw_hash_set",
"//absl/base:core_headers",
"//absl/memory",
],
)
cc_test(
name = "flat_hash_set_test",
srcs = ["flat_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":flat_hash_set",
":hash_generator_testing",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_modifiers_test",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "node_hash_map",
hdrs = ["node_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":container_memory",
":hash_function_defaults",
":node_hash_policy",
":raw_hash_map",
"//absl/memory",
],
)
cc_test(
name = "node_hash_map_test",
srcs = ["node_hash_map_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"],
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":hash_generator_testing",
":node_hash_map",
":tracked",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "node_hash_set",
hdrs = ["node_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":container_memory",
":hash_function_defaults",
":node_hash_policy",
":raw_hash_set",
"//absl/memory",
],
)
cc_test(
name = "node_hash_set_test",
srcs = ["node_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":hash_generator_testing",
":node_hash_set",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "container_memory",
hdrs = ["internal/container_memory.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
"//absl/memory",
"//absl/utility",
],
)
cc_test(
name = "container_memory_test",
srcs = ["internal/container_memory_test.cc"],
copts = ABSL_TEST_COPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":container_memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hash_function_defaults",
hdrs = ["internal/hash_function_defaults.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
"//absl/base:config",
"//absl/hash",
"//absl/strings",
],
)
cc_test(
name = "hash_function_defaults_test",
srcs = ["internal/hash_function_defaults_test.cc"],
copts = ABSL_TEST_COPTS,
tags = NOTEST_TAGS,
deps = [
":hash_function_defaults",
"//absl/hash",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hash_generator_testing",
testonly = 1,
srcs = ["internal/hash_generator_testing.cc"],
hdrs = ["internal/hash_generator_testing.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_policy_testing",
"//absl/meta:type_traits",
"//absl/strings",
],
)
cc_library(
name = "hash_policy_testing",
testonly = 1,
hdrs = ["internal/hash_policy_testing.h"],
copts = ABSL_TEST_COPTS,
deps = [
"//absl/hash",
"//absl/strings",
],
)
cc_test(
name = "hash_policy_testing_test",
srcs = ["internal/hash_policy_testing_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_policy_testing",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hash_policy_traits",
hdrs = ["internal/hash_policy_traits.h"],
copts = ABSL_DEFAULT_COPTS,
deps = ["//absl/meta:type_traits"],
)
cc_test(
name = "hash_policy_traits_test",
srcs = ["internal/hash_policy_traits_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_policy_traits",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hashtable_debug",
hdrs = ["internal/hashtable_debug.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":hashtable_debug_hooks",
],
)
cc_library(
name = "hashtable_debug_hooks",
hdrs = ["internal/hashtable_debug_hooks.h"],
copts = ABSL_DEFAULT_COPTS,
)
cc_library(
name = "node_hash_policy",
hdrs = ["internal/node_hash_policy.h"],
copts = ABSL_DEFAULT_COPTS,
)
cc_test(
name = "node_hash_policy_test",
srcs = ["internal/node_hash_policy_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_policy_traits",
":node_hash_policy",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "raw_hash_map",
hdrs = ["internal/raw_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":container_memory",
":raw_hash_set",
],
)
cc_library(
name = "raw_hash_set",
srcs = ["internal/raw_hash_set.cc"],
hdrs = ["internal/raw_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":compressed_tuple",
":container_memory",
":hash_policy_traits",
":hashtable_debug_hooks",
":layout",
"//absl/base:bits",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/types:optional",
"//absl/utility",
],
)
cc_test(
name = "raw_hash_set_test",
srcs = ["internal/raw_hash_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkstatic = 1,
tags = NOTEST_TAGS,
deps = [
":container_memory",
":hash_function_defaults",
":hash_policy_testing",
":hashtable_debug",
":raw_hash_set",
"//absl/base",
"//absl/base:core_headers",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "raw_hash_set_allocator_test",
size = "small",
srcs = ["internal/raw_hash_set_allocator_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":raw_hash_set",
":tracked",
"//absl/base:core_headers",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "layout",
hdrs = ["internal/layout.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
"//absl/strings",
"//absl/types:span",
"//absl/utility",
],
)
cc_test(
name = "layout_test",
size = "small",
srcs = ["internal/layout_test.cc"],
copts = ABSL_TEST_COPTS,
tags = NOTEST_TAGS,
visibility = ["//visibility:private"],
deps = [
":layout",
"//absl/base",
"//absl/base:core_headers",
"//absl/types:span",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "tracked",
testonly = 1,
hdrs = ["internal/tracked.h"],
copts = ABSL_TEST_COPTS,
)
cc_library(
name = "unordered_map_constructor_test",
testonly = 1,
hdrs = ["internal/unordered_map_constructor_test.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_map_lookup_test",
testonly = 1,
hdrs = ["internal/unordered_map_lookup_test.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_map_modifiers_test",
testonly = 1,
hdrs = ["internal/unordered_map_modifiers_test.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_constructor_test",
testonly = 1,
hdrs = ["internal/unordered_set_constructor_test.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_lookup_test",
testonly = 1,
hdrs = ["internal/unordered_set_lookup_test.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_modifiers_test",
testonly = 1,
hdrs = ["internal/unordered_set_modifiers_test.h"],
copts = ABSL_TEST_COPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "unordered_set_test",
srcs = ["internal/unordered_set_test.cc"],
copts = ABSL_TEST_COPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "unordered_map_test",
srcs = ["internal/unordered_map_test.cc"],
copts = ABSL_TEST_COPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
...@@ -17,12 +17,34 @@ ...@@ -17,12 +17,34 @@
list(APPEND CONTAINER_PUBLIC_HEADERS list(APPEND CONTAINER_PUBLIC_HEADERS
"fixed_array.h" "fixed_array.h"
"flat_hash_map.h"
"flat_hash_set.h"
"inlined_vector.h" "inlined_vector.h"
"node_hash_map.h"
"node_hash_set.h"
) )
list(APPEND CONTAINER_INTERNAL_HEADERS list(APPEND CONTAINER_INTERNAL_HEADERS
"internal/compressed_tuple.h"
"internal/container_memory.h"
"internal/hash_function_defaults.h"
"internal/hash_generator_testing.h"
"internal/hash_policy_testing.h"
"internal/hash_policy_traits.h"
"internal/hashtable_debug.h"
"internal/layout.h"
"internal/node_hash_policy.h"
"internal/raw_hash_map.h"
"internal/raw_hash_set.h"
"internal/test_instance_tracker.h" "internal/test_instance_tracker.h"
"internal/tracked.h"
"internal/unordered_map_constructor_test.h"
"internal/unordered_map_lookup_test.h"
"internal/unordered_map_modifiers_test.h"
"internal/unordered_set_constructor_test.h"
"internal/unordered_set_lookup_test.h"
"internal/unordered_set_modifiers_test.h"
) )
......
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: flat_hash_map.h
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_map<K, V>` is an unordered associative container of
// unique keys and associated values designed to be a more efficient replacement
// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
// deletion of map elements can be done as an `O(1)` operation. However,
// `flat_hash_map` (and other unordered associative containers known as the
// collection of Abseil "Swiss tables") contain other optimizations that result
// in both memory and computation advantages.
//
// In most cases, your default choice for a hash map should be a map of type
// `flat_hash_map`.
#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
#define ABSL_CONTAINER_FLAT_HASH_MAP_H_
#include <cstddef>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
namespace absl {
namespace container_internal {
template <class K, class V>
struct FlatHashMapPolicy;
} // namespace container_internal
// -----------------------------------------------------------------------------
// absl::flat_hash_map
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_map<K, V>` is an unordered associative container which
// has been optimized for both speed and memory footprint in most common use
// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
// the following notable differences:
//
// * Requires keys that are CopyConstructible
// * Requires values that are MoveConstructible
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the map is provided a compatible heterogeneous
// hashing function and equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash map.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `flat_hash_map` uses the `absl::Hash` hashing framework.
// All fundamental and Abseil types that support the `absl::Hash` framework have
// a compatible equality operator for comparing insertions into `flat_hash_map`.
// If your type is not yet supported by the `asbl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
// stability. If you require pointer stability, or your values are large,
// consider using `absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead.
// If your types are not moveable or you require pointer stability for keys,
// consider `absl::node_hash_map`.
//
// Example:
//
// // Create a flat hash map of three strings (that map to strings)
// absl::flat_hash_map<std::string, std::string> ducks =
// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
//
// // Insert a new element into the flat hash map
// ducks.insert({"d", "donald"}};
//
// // Force a rehash of the flat hash map
// ducks.rehash(0);
//
// // Find the element with the key "b"
// std::string search_key = "b";
// auto result = ducks.find(search_key);
// if (result != ducks.end()) {
// std::cout << "Result: " << result->second << std::endl;
// }
template <class K, class V,
class Hash = absl::container_internal::hash_default_hash<K>,
class Eq = absl::container_internal::hash_default_eq<K>,
class Allocator = std::allocator<std::pair<const K, V>>>
class flat_hash_map : public absl::container_internal::raw_hash_map<
absl::container_internal::FlatHashMapPolicy<K, V>,
Hash, Eq, Allocator> {
using Base = typename flat_hash_map::raw_hash_map;
public:
flat_hash_map() {}
using Base::Base;
// flat_hash_map::begin()
//
// Returns an iterator to the beginning of the `flat_hash_map`.
using Base::begin;
// flat_hash_map::cbegin()
//
// Returns a const iterator to the beginning of the `flat_hash_map`.
using Base::cbegin;
// flat_hash_map::cend()
//
// Returns a const iterator to the end of the `flat_hash_map`.
using Base::cend;
// flat_hash_map::end()
//
// Returns an iterator to the end of the `flat_hash_map`.
using Base::end;
// flat_hash_map::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `flat_hash_map`.
//
// NOTE: this member function is particular to `absl::flat_hash_map` and is
// not provided in the `std::unordered_map` API.
using Base::capacity;
// flat_hash_map::empty()
//
// Returns whether or not the `flat_hash_map` is empty.
using Base::empty;
// flat_hash_map::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `flat_hash_map` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `flat_hash_map<K, V>`.
using Base::max_size;
// flat_hash_map::size()
//
// Returns the number of elements currently within the `flat_hash_map`.
using Base::size;
// flat_hash_map::clear()
//
// Removes all elements from the `flat_hash_map`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;
// flat_hash_map::erase()
//
// Erases elements within the `flat_hash_map`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `flat_hash_map`, returning
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_map` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists.
using Base::erase;
// flat_hash_map::insert()
//
// Inserts an element of the specified value into the `flat_hash_map`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const init_type& value):
//
// Inserts a value into the `flat_hash_map`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
// std::pair<iterator,bool> insert(init_type&& value ):
//
// Inserts a moveable value into the `flat_hash_map`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const init_type& value):
// iterator insert(const_iterator hint, T&& value):
// iterator insert(const_iterator hint, init_type&& value );
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last ):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `flat_hash_map` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<init_type> ilist ):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `flat_hash_map` we guarantee the first match is inserted.
using Base::insert;
// flat_hash_map::insert_or_assign()
//
// Inserts an element of the specified value into the `flat_hash_map` provided
// that a value with the given key does not already exist, or replaces it with
// the element value if a key for that value already exists, returning an
// iterator pointing to the newly inserted element. If rehashing occurs due
// to the insertion, all existing iterators are invalidated. Overloads are
// listed below.
//
// pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
// pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
//
// Inserts/Assigns (or moves) the element of the specified key into the
// `flat_hash_map`.
//
// iterator insert_or_assign(const_iterator hint,
// const init_type& k, T&& obj):
// iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
//
// Inserts/Assigns (or moves) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search.
using Base::insert_or_assign;
// flat_hash_map::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_map`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;
// flat_hash_map::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_map`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;
// flat_hash_map::try_emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_map`, provided that no element with the given key
// already exists. Unlike `emplace()`, if an element with the given key
// already exists, we guarantee that no element is constructed.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
// Overloads are listed below.
//
// pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
// pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map`.
//
// iterator try_emplace(const_iterator hint,
// const init_type& k, Args&&... args):
// iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search.
using Base::try_emplace;
// flat_hash_map::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the key,value pair of the element at the indicated position and
// returns a node handle owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the key,value pair of the element with a key matching the passed
// key value and returns a node handle owning that extracted data. If the
// `flat_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
using Base::extract;
// flat_hash_map::merge()
//
// Extracts elements from a given `source` flat hash map into this
// `flat_hash_map`. If the destination `flat_hash_map` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
// flat_hash_map::swap(flat_hash_map& other)
//
// Exchanges the contents of this `flat_hash_map` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_map` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash map's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;
// flat_hash_map::rehash(count)
//
// Rehashes the `flat_hash_map`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_map`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;
// flat_hash_map::reserve(count)
//
// Sets the number of slots in the `flat_hash_map` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;
// flat_hash_map::at()
//
// Returns a reference to the mapped value of the element with key equivalent
// to the passed key.
using Base::at;
// flat_hash_map::contains()
//
// Determines whether an element with a key comparing equal to the given `key`
// exists within the `flat_hash_map`, returning `true` if so or `false`
// otherwise.
using Base::contains;
// flat_hash_map::count(const Key& key) const
//
// Returns the number of elements with a key comparing equal to the given
// `key` within the `flat_hash_map`. note that this function will return
// either `1` or `0` since duplicate keys are not allowed within a
// `flat_hash_map`.
using Base::count;
// flat_hash_map::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `flat_hash_map`.
using Base::equal_range;
// flat_hash_map::find()
//
// Finds an element with the passed `key` within the `flat_hash_map`.
using Base::find;
// flat_hash_map::operator[]()
//
// Returns a reference to the value mapped to the passed key within the
// `flat_hash_map`, performing an `insert()` if the key does not already
// exist.
//
// If an insertion occurs and results in a rehashing of the container, all
// iterators are invalidated. Otherwise iterators are not affected and
// references are not invalidated. Overloads are listed below.
//
// T& operator[](const Key& key ):
//
// Inserts an init_type object constructed in-place if the element with the
// given key does not exist.
//
// T& operator[]( Key&& key ):
//
// Inserts an init_type object constructed in-place provided that an element
// with the given key does not exist.
using Base::operator[];
// flat_hash_map::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_map`. Note that
// because a flat hash map contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_map`.
using Base::bucket_count;
// flat_hash_map::load_factor()
//
// Returns the current load factor of the `flat_hash_map` (the average number
// of slots occupied with a value within the hash map).
using Base::load_factor;
// flat_hash_map::max_load_factor()
//
// Manages the maximum load factor of the `flat_hash_map`. Overloads are
// listed below.
//
// float flat_hash_map::max_load_factor()
//
// Returns the current maximum load factor of the `flat_hash_map`.
//
// void flat_hash_map::max_load_factor(float ml)
//
// Sets the maximum load factor of the `flat_hash_map` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `flat_hash_map` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;
// flat_hash_map::get_allocator()
//
// Returns the allocator function associated with this `flat_hash_map`.
using Base::get_allocator;
// flat_hash_map::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `flat_hash_map`.
using Base::hash_function;
// flat_hash_map::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};
namespace container_internal {
template <class K, class V>
struct FlatHashMapPolicy {
using slot_type = container_internal::slot_type<K, V>;
using key_type = K;
using mapped_type = V;
using init_type = std::pair</*non const*/ key_type, mapped_type>;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
slot_type::construct(alloc, slot, std::forward<Args>(args)...);
}
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
slot_type::destroy(alloc, slot);
}
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
slot_type::transfer(alloc, new_slot, old_slot);
}
template <class F, class... Args>
static decltype(absl::container_internal::DecomposePair(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposePair(std::forward<F>(f),
std::forward<Args>(args)...);
}
static size_t space_used(const slot_type*) { return 0; }
static std::pair<const K, V>& element(slot_type* slot) { return slot->value; }
static V& value(std::pair<const K, V>* kv) { return kv->second; }
static const V& value(const std::pair<const K, V>* kv) { return kv->second; }
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/flat_hash_map.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
#include "absl/types/any.h"
namespace absl {
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
using ::absl::container_internal::hash_internal::EnumClass;
using ::testing::_;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
template <class K, class V>
using Map =
flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual, Alloc<>>;
static_assert(!std::is_standard_layout<NonStandardLayout>(), "");
using MapTypes =
::testing::Types<Map<int, int>, Map<std::string, int>, Map<Enum, std::string>,
Map<EnumClass, int>, Map<int, NonStandardLayout>,
Map<NonStandardLayout, int>>;
INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ConstructorTest, MapTypes);
INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, LookupTest, MapTypes);
INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ModifiersTest, MapTypes);
TEST(FlatHashMap, StandardLayout) {
struct Int {
explicit Int(size_t value) : value(value) {}
Int() : value(0) { ADD_FAILURE(); }
Int(const Int& other) : value(other.value) { ADD_FAILURE(); }
Int(Int&&) = default;
bool operator==(const Int& other) const { return value == other.value; }
size_t value;
};
static_assert(std::is_standard_layout<Int>(), "");
struct Hash {
size_t operator()(const Int& obj) const { return obj.value; }
};
// Verify that neither the key nor the value get default-constructed or
// copy-constructed.
{
flat_hash_map<Int, Int, Hash> m;
m.try_emplace(Int(1), Int(2));
m.try_emplace(Int(3), Int(4));
m.erase(Int(1));
m.rehash(2 * m.bucket_count());
}
{
flat_hash_map<Int, Int, Hash> m;
m.try_emplace(Int(1), Int(2));
m.try_emplace(Int(3), Int(4));
m.erase(Int(1));
m.clear();
}
}
// gcc becomes unhappy if this is inside the method, so pull it out here.
struct balast {};
TEST(FlatHashMap, IteratesMsan) {
// Because SwissTable randomizes on pointer addresses, we keep old tables
// around to ensure we don't reuse old memory.
std::vector<absl::flat_hash_map<int, balast>> garbage;
for (int i = 0; i < 100; ++i) {
absl::flat_hash_map<int, balast> t;
for (int j = 0; j < 100; ++j) {
t[j];
for (const auto& p : t) EXPECT_THAT(p, Pair(_, _));
}
garbage.push_back(std::move(t));
}
}
// Demonstration of the "Lazy Key" pattern. This uses heterogenous insert to
// avoid creating expensive key elements when the item is already present in the
// map.
struct LazyInt {
explicit LazyInt(size_t value, int* tracker)
: value(value), tracker(tracker) {}
explicit operator size_t() const {
++*tracker;
return value;
}
size_t value;
int* tracker;
};
struct Hash {
using is_transparent = void;
int* tracker;
size_t operator()(size_t obj) const {
++*tracker;
return obj;
}
size_t operator()(const LazyInt& obj) const {
++*tracker;
return obj.value;
}
};
struct Eq {
using is_transparent = void;
bool operator()(size_t lhs, size_t rhs) const {
return lhs == rhs;
}
bool operator()(size_t lhs, const LazyInt& rhs) const {
return lhs == rhs.value;
}
};
TEST(FlatHashMap, LazyKeyPattern) {
// hashes are only guaranteed in opt mode, we use assertions to track internal
// state that can cause extra calls to hash.
int conversions = 0;
int hashes = 0;
flat_hash_map<size_t, size_t, Hash, Eq> m(0, Hash{&hashes});
m[LazyInt(1, &conversions)] = 1;
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1)));
EXPECT_EQ(conversions, 1);
#ifdef NDEBUG
EXPECT_EQ(hashes, 1);
#endif
m[LazyInt(1, &conversions)] = 2;
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2)));
EXPECT_EQ(conversions, 1);
#ifdef NDEBUG
EXPECT_EQ(hashes, 2);
#endif
m.try_emplace(LazyInt(2, &conversions), 3);
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3)));
EXPECT_EQ(conversions, 2);
#ifdef NDEBUG
EXPECT_EQ(hashes, 3);
#endif
m.try_emplace(LazyInt(2, &conversions), 4);
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3)));
EXPECT_EQ(conversions, 2);
#ifdef NDEBUG
EXPECT_EQ(hashes, 4);
#endif
}
TEST(FlatHashMap, BitfieldArgument) {
union {
int n : 1;
};
n = 0;
flat_hash_map<int, int> m;
m.erase(n);
m.count(n);
m.prefetch(n);
m.find(n);
m.contains(n);
m.equal_range(n);
m.insert_or_assign(n, n);
m.insert_or_assign(m.end(), n, n);
m.try_emplace(n);
m.try_emplace(m.end(), n);
m.at(n);
m[n];
}
TEST(FlatHashMap, MergeExtractInsert) {
// We can't test mutable keys, or non-copyable keys with flat_hash_map.
// Test that the nodes have the proper API.
absl::flat_hash_map<int, int> m = {{1, 7}, {2, 9}};
auto node = m.extract(1);
EXPECT_TRUE(node);
EXPECT_EQ(node.key(), 1);
EXPECT_EQ(node.mapped(), 7);
EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9)));
node.mapped() = 17;
m.insert(std::move(node));
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9)));
}
#if !defined(__ANDROID__) && !defined(__APPLE__) && !defined(__EMSCRIPTEN__)
TEST(FlatHashMap, Any) {
absl::flat_hash_map<int, absl::any> m;
m.emplace(1, 7);
auto it = m.find(1);
ASSERT_NE(it, m.end());
EXPECT_EQ(7, absl::any_cast<int>(it->second));
m.emplace(std::piecewise_construct, std::make_tuple(2), std::make_tuple(8));
it = m.find(2);
ASSERT_NE(it, m.end());
EXPECT_EQ(8, absl::any_cast<int>(it->second));
m.emplace(std::piecewise_construct, std::make_tuple(3),
std::make_tuple(absl::any(9)));
it = m.find(3);
ASSERT_NE(it, m.end());
EXPECT_EQ(9, absl::any_cast<int>(it->second));
struct H {
size_t operator()(const absl::any&) const { return 0; }
};
struct E {
bool operator()(const absl::any&, const absl::any&) const { return true; }
};
absl::flat_hash_map<absl::any, int, H, E> m2;
m2.emplace(1, 7);
auto it2 = m2.find(1);
ASSERT_NE(it2, m2.end());
EXPECT_EQ(7, it2->second);
}
#endif // __ANDROID__
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: flat_hash_set.h
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_set<T>` is an unordered associative container designed to
// be a more efficient replacement for `std::unordered_set`. Like
// `unordered_set`, search, insertion, and deletion of set elements can be done
// as an `O(1)` operation. However, `flat_hash_set` (and other unordered
// associative containers known as the collection of Abseil "Swiss tables")
// contain other optimizations that result in both memory and computation
// advantages.
//
// In most cases, your default choice for a hash set should be a set of type
// `flat_hash_set`.
#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
#define ABSL_CONTAINER_FLAT_HASH_SET_H_
#include <type_traits>
#include <utility>
#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
namespace absl {
namespace container_internal {
template <typename T>
struct FlatHashSetPolicy;
} // namespace container_internal
// -----------------------------------------------------------------------------
// absl::flat_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_set<T>` is an unordered associative container which has
// been optimized for both speed and memory footprint in most common use cases.
// Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Requires keys that are CopyConstructible
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the set is provided a compatible heterogeneous
// hashing function and equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
// fundamental and Abseil types that support the `absl::Hash` framework have a
// compatible equality operator for comparing insertions into `flat_hash_map`.
// If your type is not yet supported by the `asbl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
// require pointer stability, consider using
// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
// you require pointer stability, consider `absl::node_hash_set` instead.
//
// Example:
//
// // Create a flat hash set of three strings
// absl::flat_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
// // Insert a new element into the flat hash set
// ducks.insert("donald"};
//
// // Force a rehash of the flat hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
class Allocator = std::allocator<T>>
class flat_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
using Base = typename flat_hash_set::raw_hash_set;
public:
flat_hash_set() {}
using Base::Base;
// flat_hash_set::begin()
//
// Returns an iterator to the beginning of the `flat_hash_set`.
using Base::begin;
// flat_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `flat_hash_set`.
using Base::cbegin;
// flat_hash_set::cend()
//
// Returns a const iterator to the end of the `flat_hash_set`.
using Base::cend;
// flat_hash_set::end()
//
// Returns an iterator to the end of the `flat_hash_set`.
using Base::end;
// flat_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `flat_hash_set`.
//
// NOTE: this member function is particular to `absl::flat_hash_set` and is
// not provided in the `std::unordered_map` API.
using Base::capacity;
// flat_hash_set::empty()
//
// Returns whether or not the `flat_hash_set` is empty.
using Base::empty;
// flat_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `flat_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `flat_hash_set<T>`.
using Base::max_size;
// flat_hash_set::size()
//
// Returns the number of elements currently within the `flat_hash_set`.
using Base::size;
// flat_hash_set::clear()
//
// Removes all elements from the `flat_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;
// flat_hash_set::erase()
//
// Erases elements within the `flat_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `flat_hash_set`, returning
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_map` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists.
using Base::erase;
// flat_hash_set::insert()
//
// Inserts an element of the specified value into the `flat_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `flat_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `flat_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last ):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `flat_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist ):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `flat_hash_set` we guarantee the first match is inserted.
using Base::insert;
// flat_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;
// flat_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;
// flat_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `flat_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;
// flat_hash_set::merge()
//
// Extracts elements from a given `source` flat hash map into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;
// flat_hash_set::rehash(count)
//
// Rehashes the `flat_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;
// flat_hash_set::reserve(count)
//
// Sets the number of slots in the `flat_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;
// flat_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `flat_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;
// flat_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `flat_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `flat_hash_set`.
using Base::count;
// flat_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `flat_hash_set`.
using Base::equal_range;
// flat_hash_set::find()
//
// Finds an element with the passed `key` within the `flat_hash_set`.
using Base::find;
// flat_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_set`. Note that
// because a flat hash map contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_set`.
using Base::bucket_count;
// flat_hash_set::load_factor()
//
// Returns the current load factor of the `flat_hash_set` (the average number
// of slots occupied with a value within the hash map).
using Base::load_factor;
// flat_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `flat_hash_set`. Overloads are
// listed below.
//
// float flat_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `flat_hash_set`.
//
// void flat_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `flat_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `flat_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;
// flat_hash_set::get_allocator()
//
// Returns the allocator function associated with this `flat_hash_set`.
using Base::get_allocator;
// flat_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `flat_hash_set`.
using Base::hash_function;
// flat_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};
namespace container_internal {
template <class T>
struct FlatHashSetPolicy {
using slot_type = T;
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
absl::allocator_traits<Allocator>::construct(*alloc, slot,
std::forward<Args>(args)...);
}
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
}
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}
static T& element(slot_type* slot) { return *slot; }
template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}
static size_t space_used(const T*) { return 0; }
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/flat_hash_set.h"
#include <vector>
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
using ::absl::container_internal::hash_internal::EnumClass;
using ::testing::Pointee;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
template <class T>
using Set =
absl::flat_hash_set<T, StatefulTestingHash, StatefulTestingEqual, Alloc<T>>;
using SetTypes =
::testing::Types<Set<int>, Set<std::string>, Set<Enum>, Set<EnumClass>>;
INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ConstructorTest, SetTypes);
INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, LookupTest, SetTypes);
INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ModifiersTest, SetTypes);
TEST(FlatHashSet, EmplaceString) {
std::vector<std::string> v = {"a", "b"};
absl::flat_hash_set<absl::string_view> hs(v.begin(), v.end());
EXPECT_THAT(hs, UnorderedElementsAreArray(v));
}
TEST(FlatHashSet, BitfieldArgument) {
union {
int n : 1;
};
n = 0;
absl::flat_hash_set<int> s = {n};
s.insert(n);
s.insert(s.end(), n);
s.insert({n});
s.erase(n);
s.count(n);
s.prefetch(n);
s.find(n);
s.contains(n);
s.equal_range(n);
}
TEST(FlatHashSet, MergeExtractInsert) {
struct Hash {
size_t operator()(const std::unique_ptr<int>& p) const { return *p; }
};
struct Eq {
bool operator()(const std::unique_ptr<int>& a,
const std::unique_ptr<int>& b) const {
return *a == *b;
}
};
absl::flat_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2;
set1.insert(absl::make_unique<int>(7));
set1.insert(absl::make_unique<int>(17));
set2.insert(absl::make_unique<int>(7));
set2.insert(absl::make_unique<int>(19));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17)));
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19)));
set1.merge(set2);
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19)));
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
auto node = set1.extract(absl::make_unique<int>(7));
EXPECT_TRUE(node);
EXPECT_THAT(node.value(), Pointee(7));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19)));
auto insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_FALSE(insert_result.inserted);
EXPECT_TRUE(insert_result.node);
EXPECT_THAT(insert_result.node.value(), Pointee(7));
EXPECT_EQ(**insert_result.position, 7);
EXPECT_NE(insert_result.position->get(), insert_result.node.value().get());
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
node = set1.extract(absl::make_unique<int>(17));
EXPECT_TRUE(node);
EXPECT_THAT(node.value(), Pointee(17));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19)));
node.value() = absl::make_unique<int>(23);
insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_TRUE(insert_result.inserted);
EXPECT_FALSE(insert_result.node);
EXPECT_EQ(**insert_result.position, 23);
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23)));
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#ifdef MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
#endif
#include <cassert>
#include <cstddef>
#include <memory>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/utility/utility.h"
namespace absl {
namespace container_internal {
// Allocates at least n bytes aligned to the specified alignment.
// Alignment must be a power of 2. It must be positive.
//
// Note that many allocators don't honor alignment requirements above certain
// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
// Allocate() doesn't apply alignment corrections. If the underlying allocator
// returns insufficiently alignment pointer, that's what you are going to get.
template <size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
struct alignas(Alignment) M {};
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A mem_alloc(*alloc);
void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
}
// The pointer must have been previously obtained by calling
// Allocate<Alignment>(alloc, n).
template <size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
struct alignas(Alignment) M {};
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A mem_alloc(*alloc);
AT::deallocate(mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}
namespace memory_internal {
// Constructs T into uninitialized storage pointed by `ptr` using the args
// specified in the tuple.
template <class Alloc, class T, class Tuple, size_t... I>
void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
absl::index_sequence<I...>) {
absl::allocator_traits<Alloc>::construct(
*alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
}
template <class T, class F>
struct WithConstructedImplF {
template <class... Args>
decltype(std::declval<F>()(std::declval<T>())) operator()(
Args&&... args) const {
return std::forward<F>(f)(T(std::forward<Args>(args)...));
}
F&& f;
};
template <class T, class Tuple, size_t... Is, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
Tuple&& t, absl::index_sequence<Is...>, F&& f) {
return WithConstructedImplF<T, F>{std::forward<F>(f)}(
std::get<Is>(std::forward<Tuple>(t))...);
}
template <class T, size_t... Is>
auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
-> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
}
// Returns a tuple of references to the elements of the input tuple. T must be a
// tuple.
template <class T>
auto TupleRef(T&& t) -> decltype(
TupleRefImpl(std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>())) {
return TupleRefImpl(
std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>());
}
template <class F, class K, class V>
decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
std::declval<std::tuple<K>>(), std::declval<V>()))
DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
const auto& key = std::get<0>(p.first);
return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
std::move(p.second));
}
} // namespace memory_internal
// Constructs T into uninitialized storage pointed by `ptr` using the args
// specified in the tuple.
template <class Alloc, class T, class Tuple>
void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
memory_internal::ConstructFromTupleImpl(
alloc, ptr, std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>());
}
// Constructs T using the args specified in the tuple and calls F with the
// constructed value.
template <class T, class Tuple, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
Tuple&& t, F&& f) {
return memory_internal::WithConstructedImpl<T>(
std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>(),
std::forward<F>(f));
}
// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
// tuples with references to the passed arguments. The tuples contain
// constructor arguments for the first and the second elements of the pair.
//
// The following two snippets are equivalent.
//
// 1. std::pair<F, S> p(args...);
//
// 2. auto a = PairArgs(args...);
// std::pair<F, S> p(std::piecewise_construct,
// std::move(p.first), std::move(p.second));
inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
std::forward_as_tuple(std::forward<S>(s))};
}
template <class F, class S>
std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
const std::pair<F, S>& p) {
return PairArgs(p.first, p.second);
}
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
}
template <class F, class S>
auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
-> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
memory_internal::TupleRef(std::forward<S>(s)))) {
return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
memory_internal::TupleRef(std::forward<S>(s)));
}
// A helper function for implementing apply() in map policies.
template <class F, class... Args>
auto DecomposePair(F&& f, Args&&... args)
-> decltype(memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
return memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
}
// A helper function for implementing apply() in set policies.
template <class F, class Arg>
decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
DecomposeValue(F&& f, Arg&& arg) {
const auto& key = arg;
return std::forward<F>(f)(key, std::forward<Arg>(arg));
}
// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
#endif
#ifdef MEMORY_SANITIZER
__msan_poison(m, s);
#endif
(void)m;
(void)s;
}
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
#ifdef ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif
#ifdef MEMORY_SANITIZER
__msan_unpoison(m, s);
#endif
(void)m;
(void)s;
}
template <typename T>
inline void SanitizerPoisonObject(const T* object) {
SanitizerPoisonMemoryRegion(object, sizeof(T));
}
template <typename T>
inline void SanitizerUnpoisonObject(const T* object) {
SanitizerUnpoisonMemoryRegion(object, sizeof(T));
}
namespace memory_internal {
// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
// offsetof(Pair, second) respectively. Otherwise they are -1.
//
// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
// type, which is non-portable.
template <class Pair, class = std::true_type>
struct OffsetOf {
static constexpr size_t kFirst = -1;
static constexpr size_t kSecond = -1;
};
template <class Pair>
struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
static constexpr size_t kFirst = offsetof(Pair, first);
static constexpr size_t kSecond = offsetof(Pair, second);
};
template <class K, class V>
struct IsLayoutCompatible {
private:
struct Pair {
K first;
V second;
};
// Is P layout-compatible with Pair?
template <class P>
static constexpr bool LayoutCompatible() {
return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
alignof(P) == alignof(Pair) &&
memory_internal::OffsetOf<P>::kFirst ==
memory_internal::OffsetOf<Pair>::kFirst &&
memory_internal::OffsetOf<P>::kSecond ==
memory_internal::OffsetOf<Pair>::kSecond;
}
public:
// Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
// then it is safe to store them in a union and read from either.
static constexpr bool value = std::is_standard_layout<K>() &&
std::is_standard_layout<Pair>() &&
memory_internal::OffsetOf<Pair>::kFirst == 0 &&
LayoutCompatible<std::pair<K, V>>() &&
LayoutCompatible<std::pair<const K, V>>();
};
} // namespace memory_internal
// If kMutableKeys is false, only the value member is accessed.
//
// If kMutableKeys is true, key is accessed through all slots while value and
// mutable_value are accessed only via INITIALIZED slots. Slots are created and
// destroyed via mutable_value so that the key can be moved later.
template <class K, class V>
union slot_type {
private:
static void emplace(slot_type* slot) {
// The construction of union doesn't do anything at runtime but it allows us
// to access its members without violating aliasing rules.
new (slot) slot_type;
}
// If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
// or the other via slot_type. We are also free to access the key via
// slot_type::key in this case.
using kMutableKeys =
std::integral_constant<bool,
memory_internal::IsLayoutCompatible<K, V>::value>;
public:
slot_type() {}
~slot_type() = delete;
using value_type = std::pair<const K, V>;
using mutable_value_type = std::pair<K, V>;
value_type value;
mutable_value_type mutable_value;
K key;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
emplace(slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
std::forward<Args>(args)...);
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
std::forward<Args>(args)...);
}
}
// Construct this slot by moving from another slot.
template <class Allocator>
static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
emplace(slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
*alloc, &slot->mutable_value, std::move(other->mutable_value));
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
std::move(other->value));
}
}
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
} else {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
}
}
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
emplace(new_slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
*alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
std::move(old_slot->value));
}
destroy(alloc, old_slot);
}
template <class Allocator>
static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
if (kMutableKeys::value) {
using std::swap;
swap(a->mutable_value, b->mutable_value);
} else {
value_type tmp = std::move(a->value);
absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
std::move(b->value));
absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
std::move(tmp));
}
}
template <class Allocator>
static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
if (kMutableKeys::value) {
dest->mutable_value = std::move(src->mutable_value);
} else {
absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
std::move(src->value));
}
}
template <class Allocator>
static void move(Allocator* alloc, slot_type* first, slot_type* last,
slot_type* result) {
for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
move(alloc, src, dest);
}
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/container_memory.h"
#include <cstdint>
#include <tuple>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
namespace {
using ::testing::Pair;
TEST(Memory, AlignmentLargerThanBase) {
std::allocator<int8_t> alloc;
void* mem = Allocate<2>(&alloc, 3);
EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
memcpy(mem, "abc", 3);
Deallocate<2>(&alloc, mem, 3);
}
TEST(Memory, AlignmentSmallerThanBase) {
std::allocator<int64_t> alloc;
void* mem = Allocate<2>(&alloc, 3);
EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
memcpy(mem, "abc", 3);
Deallocate<2>(&alloc, mem, 3);
}
class Fixture : public ::testing::Test {
using Alloc = std::allocator<std::string>;
public:
Fixture() { ptr_ = std::allocator_traits<Alloc>::allocate(*alloc(), 1); }
~Fixture() override {
std::allocator_traits<Alloc>::destroy(*alloc(), ptr_);
std::allocator_traits<Alloc>::deallocate(*alloc(), ptr_, 1);
}
std::string* ptr() { return ptr_; }
Alloc* alloc() { return &alloc_; }
private:
Alloc alloc_;
std::string* ptr_;
};
TEST_F(Fixture, ConstructNoArgs) {
ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple());
EXPECT_EQ(*ptr(), "");
}
TEST_F(Fixture, ConstructOneArg) {
ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde"));
EXPECT_EQ(*ptr(), "abcde");
}
TEST_F(Fixture, ConstructTwoArg) {
ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a'));
EXPECT_EQ(*ptr(), "aaaaa");
}
TEST(PairArgs, NoArgs) {
EXPECT_THAT(PairArgs(),
Pair(std::forward_as_tuple(), std::forward_as_tuple()));
}
TEST(PairArgs, TwoArgs) {
EXPECT_EQ(
std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
PairArgs(1, 'A'));
}
TEST(PairArgs, Pair) {
EXPECT_EQ(
std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
PairArgs(std::make_pair(1, 'A')));
}
TEST(PairArgs, Piecewise) {
EXPECT_EQ(
std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
PairArgs(std::piecewise_construct, std::forward_as_tuple(1),
std::forward_as_tuple('A')));
}
TEST(WithConstructed, Simple) {
EXPECT_EQ(1, WithConstructed<absl::string_view>(
std::make_tuple(std::string("a")),
[](absl::string_view str) { return str.size(); }));
}
template <class F, class Arg>
decltype(DecomposeValue(std::declval<F>(), std::declval<Arg>()))
DecomposeValueImpl(int, F&& f, Arg&& arg) {
return DecomposeValue(std::forward<F>(f), std::forward<Arg>(arg));
}
template <class F, class Arg>
const char* DecomposeValueImpl(char, F&& f, Arg&& arg) {
return "not decomposable";
}
template <class F, class Arg>
decltype(DecomposeValueImpl(0, std::declval<F>(), std::declval<Arg>()))
TryDecomposeValue(F&& f, Arg&& arg) {
return DecomposeValueImpl(0, std::forward<F>(f), std::forward<Arg>(arg));
}
TEST(DecomposeValue, Decomposable) {
auto f = [](const int& x, int&& y) {
EXPECT_EQ(&x, &y);
EXPECT_EQ(42, x);
return 'A';
};
EXPECT_EQ('A', TryDecomposeValue(f, 42));
}
TEST(DecomposeValue, NotDecomposable) {
auto f = [](void*) {
ADD_FAILURE() << "Must not be called";
return 'A';
};
EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42));
}
template <class F, class... Args>
decltype(DecomposePair(std::declval<F>(), std::declval<Args>()...))
DecomposePairImpl(int, F&& f, Args&&... args) {
return DecomposePair(std::forward<F>(f), std::forward<Args>(args)...);
}
template <class F, class... Args>
const char* DecomposePairImpl(char, F&& f, Args&&... args) {
return "not decomposable";
}
template <class F, class... Args>
decltype(DecomposePairImpl(0, std::declval<F>(), std::declval<Args>()...))
TryDecomposePair(F&& f, Args&&... args) {
return DecomposePairImpl(0, std::forward<F>(f), std::forward<Args>(args)...);
}
TEST(DecomposePair, Decomposable) {
auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k,
std::tuple<double>&& v) {
EXPECT_EQ(&x, &std::get<0>(k));
EXPECT_EQ(42, x);
EXPECT_EQ(0.5, std::get<0>(v));
return 'A';
};
EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5));
EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5)));
EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct,
std::make_tuple(42), std::make_tuple(0.5)));
}
TEST(DecomposePair, NotDecomposable) {
auto f = [](...) {
ADD_FAILURE() << "Must not be called";
return 'A';
};
EXPECT_STREQ("not decomposable",
TryDecomposePair(f));
EXPECT_STREQ("not decomposable",
TryDecomposePair(f, std::piecewise_construct, std::make_tuple(),
std::make_tuple(0.5)));
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Define the default Hash and Eq functions for SwissTable containers.
//
// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
// functions for SwissTable containers. There are two reasons for this.
//
// SwissTable containers are power of 2 sized containers:
//
// This means they use the lower bits of the hash value to find the slot for
// each entry. The typical hash function for integral types is the identity.
// This is a very weak hash function for SwissTable and any power of 2 sized
// hashtable implementation which will lead to excessive collisions. For
// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
//
// SwissTable containers support heterogeneous lookup:
//
// In order to make heterogeneous lookup work, hash and equal functions must be
// polymorphic. At the same time they have to satisfy the same requirements the
// C++ standard imposes on hash functions and equality operators. That is:
//
// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
// hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
//
// For SwissTable containers this requirement is relaxed to allow a and b of
// any and possibly different types. Note that like the standard the hash and
// equal functions are still bound to T. This is important because some type U
// can be hashed by/tested for equality differently depending on T. A notable
// example is `const char*`. `const char*` is treated as a c-style string when
// the hash function is hash<string> but as a pointer when the hash function is
// hash<void*>.
//
#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#include <stdint.h>
#include <cstddef>
#include <memory>
#include <string>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
// The hash of an object of type T is computed by using absl::Hash.
template <class T, class E = void>
struct HashEq {
using Hash = absl::Hash<T>;
using Eq = std::equal_to<T>;
};
struct StringHash {
using is_transparent = void;
size_t operator()(absl::string_view v) const {
return absl::Hash<absl::string_view>{}(v);
}
};
// Supports heterogeneous lookup for string-like elements.
struct StringHashEq {
using Hash = StringHash;
struct Eq {
using is_transparent = void;
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs;
}
};
};
#if defined(HAS_GLOBAL_STRING)
template <>
struct HashEq<std::string> : StringHashEq {};
#endif
template <>
struct HashEq<std::string> : StringHashEq {};
template <>
struct HashEq<absl::string_view> : StringHashEq {};
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
struct HashEq<T*> {
struct Hash {
using is_transparent = void;
template <class U>
size_t operator()(const U& ptr) const {
return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
}
};
struct Eq {
using is_transparent = void;
template <class A, class B>
bool operator()(const A& a, const B& b) const {
return HashEq::ToPtr(a) == HashEq::ToPtr(b);
}
};
private:
static const T* ToPtr(const T* ptr) { return ptr; }
template <class U, class D>
static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
return ptr.get();
}
template <class U>
static const T* ToPtr(const std::shared_ptr<U>& ptr) {
return ptr.get();
}
};
template <class T, class D>
struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
template <class T>
struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
// This header's visibility is restricted. If you need to access the default
// hasher please use the container's ::hasher alias instead.
//
// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
template <class T>
using hash_default_hash = typename container_internal::HashEq<T>::Hash;
// This header's visibility is restricted. If you need to access the default
// key equal please use the container's ::key_equal alias instead.
//
// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
template <class T>
using hash_default_eq = typename container_internal::HashEq<T>::Eq;
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/hash_function_defaults.h"
#include <functional>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
namespace {
using ::testing::Types;
TEST(Eq, Int32) {
hash_default_eq<int32_t> eq;
EXPECT_TRUE(eq(1, 1u));
EXPECT_TRUE(eq(1, char{1}));
EXPECT_TRUE(eq(1, true));
EXPECT_TRUE(eq(1, double{1.1}));
EXPECT_FALSE(eq(1, char{2}));
EXPECT_FALSE(eq(1, 2u));
EXPECT_FALSE(eq(1, false));
EXPECT_FALSE(eq(1, 2.));
}
TEST(Hash, Int32) {
hash_default_hash<int32_t> hash;
auto h = hash(1);
EXPECT_EQ(h, hash(1u));
EXPECT_EQ(h, hash(char{1}));
EXPECT_EQ(h, hash(true));
EXPECT_EQ(h, hash(double{1.1}));
EXPECT_NE(h, hash(2u));
EXPECT_NE(h, hash(char{2}));
EXPECT_NE(h, hash(false));
EXPECT_NE(h, hash(2.));
}
enum class MyEnum { A, B, C, D };
TEST(Eq, Enum) {
hash_default_eq<MyEnum> eq;
EXPECT_TRUE(eq(MyEnum::A, MyEnum::A));
EXPECT_FALSE(eq(MyEnum::A, MyEnum::B));
}
TEST(Hash, Enum) {
hash_default_hash<MyEnum> hash;
for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) {
auto h = hash(e);
EXPECT_EQ(h, hash_default_hash<int>{}(static_cast<int>(e)));
EXPECT_NE(h, hash(MyEnum::D));
}
}
using StringTypes = ::testing::Types<std::string, absl::string_view>;
template <class T>
struct EqString : ::testing::Test {
hash_default_eq<T> key_eq;
};
TYPED_TEST_CASE(EqString, StringTypes);
template <class T>
struct HashString : ::testing::Test {
hash_default_hash<T> hasher;
};
TYPED_TEST_CASE(HashString, StringTypes);
TYPED_TEST(EqString, Works) {
auto eq = this->key_eq;
EXPECT_TRUE(eq("a", "a"));
EXPECT_TRUE(eq("a", absl::string_view("a")));
EXPECT_TRUE(eq("a", std::string("a")));
EXPECT_FALSE(eq("a", "b"));
EXPECT_FALSE(eq("a", absl::string_view("b")));
EXPECT_FALSE(eq("a", std::string("b")));
}
TYPED_TEST(HashString, Works) {
auto hash = this->hasher;
auto h = hash("a");
EXPECT_EQ(h, hash(absl::string_view("a")));
EXPECT_EQ(h, hash(std::string("a")));
EXPECT_NE(h, hash(absl::string_view("b")));
EXPECT_NE(h, hash(std::string("b")));
}
struct NoDeleter {
template <class T>
void operator()(const T* ptr) const {}
};
using PointerTypes =
::testing::Types<const int*, int*, std::unique_ptr<const int>,
std::unique_ptr<const int, NoDeleter>,
std::unique_ptr<int>, std::unique_ptr<int, NoDeleter>,
std::shared_ptr<const int>, std::shared_ptr<int>>;
template <class T>
struct EqPointer : ::testing::Test {
hash_default_eq<T> key_eq;
};
TYPED_TEST_CASE(EqPointer, PointerTypes);
template <class T>
struct HashPointer : ::testing::Test {
hash_default_hash<T> hasher;
};
TYPED_TEST_CASE(HashPointer, PointerTypes);
TYPED_TEST(EqPointer, Works) {
int dummy;
auto eq = this->key_eq;
auto sptr = std::make_shared<int>();
std::shared_ptr<const int> csptr = sptr;
int* ptr = sptr.get();
const int* cptr = ptr;
std::unique_ptr<int, NoDeleter> uptr(ptr);
std::unique_ptr<const int, NoDeleter> cuptr(ptr);
EXPECT_TRUE(eq(ptr, cptr));
EXPECT_TRUE(eq(ptr, sptr));
EXPECT_TRUE(eq(ptr, uptr));
EXPECT_TRUE(eq(ptr, csptr));
EXPECT_TRUE(eq(ptr, cuptr));
EXPECT_FALSE(eq(&dummy, cptr));
EXPECT_FALSE(eq(&dummy, sptr));
EXPECT_FALSE(eq(&dummy, uptr));
EXPECT_FALSE(eq(&dummy, csptr));
EXPECT_FALSE(eq(&dummy, cuptr));
}
TEST(Hash, DerivedAndBase) {
struct Base {};
struct Derived : Base {};
hash_default_hash<Base*> hasher;
Base base;
Derived derived;
EXPECT_NE(hasher(&base), hasher(&derived));
EXPECT_EQ(hasher(static_cast<Base*>(&derived)), hasher(&derived));
auto dp = std::make_shared<Derived>();
EXPECT_EQ(hasher(static_cast<Base*>(dp.get())), hasher(dp));
}
TEST(Hash, FunctionPointer) {
using Func = int (*)();
hash_default_hash<Func> hasher;
hash_default_eq<Func> eq;
Func p1 = [] { return 1; }, p2 = [] { return 2; };
EXPECT_EQ(hasher(p1), hasher(p1));
EXPECT_TRUE(eq(p1, p1));
EXPECT_NE(hasher(p1), hasher(p2));
EXPECT_FALSE(eq(p1, p2));
}
TYPED_TEST(HashPointer, Works) {
int dummy;
auto hash = this->hasher;
auto sptr = std::make_shared<int>();
std::shared_ptr<const int> csptr = sptr;
int* ptr = sptr.get();
const int* cptr = ptr;
std::unique_ptr<int, NoDeleter> uptr(ptr);
std::unique_ptr<const int, NoDeleter> cuptr(ptr);
EXPECT_EQ(hash(ptr), hash(cptr));
EXPECT_EQ(hash(ptr), hash(sptr));
EXPECT_EQ(hash(ptr), hash(uptr));
EXPECT_EQ(hash(ptr), hash(csptr));
EXPECT_EQ(hash(ptr), hash(cuptr));
EXPECT_NE(hash(&dummy), hash(cptr));
EXPECT_NE(hash(&dummy), hash(sptr));
EXPECT_NE(hash(&dummy), hash(uptr));
EXPECT_NE(hash(&dummy), hash(csptr));
EXPECT_NE(hash(&dummy), hash(cuptr));
}
// Cartesian product of (string, std::string, absl::string_view)
// with (string, std::string, absl::string_view, const char*).
using StringTypesCartesianProduct = Types<
// clang-format off
std::pair<std::string, std::string>,
std::pair<std::string, absl::string_view>,
std::pair<std::string, const char*>,
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
std::pair<absl::string_view, const char*>>;
// clang-format on
constexpr char kFirstString[] = "abc123";
constexpr char kSecondString[] = "ijk456";
template <typename T>
struct StringLikeTest : public ::testing::Test {
typename T::first_type a1{kFirstString};
typename T::second_type b1{kFirstString};
typename T::first_type a2{kSecondString};
typename T::second_type b2{kSecondString};
hash_default_eq<typename T::first_type> eq;
hash_default_hash<typename T::first_type> hash;
};
TYPED_TEST_CASE_P(StringLikeTest);
TYPED_TEST_P(StringLikeTest, Eq) {
EXPECT_TRUE(this->eq(this->a1, this->b1));
EXPECT_TRUE(this->eq(this->b1, this->a1));
}
TYPED_TEST_P(StringLikeTest, NotEq) {
EXPECT_FALSE(this->eq(this->a1, this->b2));
EXPECT_FALSE(this->eq(this->b2, this->a1));
}
TYPED_TEST_P(StringLikeTest, HashEq) {
EXPECT_EQ(this->hash(this->a1), this->hash(this->b1));
EXPECT_EQ(this->hash(this->a2), this->hash(this->b2));
// It would be a poor hash function which collides on these strings.
EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
}
TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct);
} // namespace
} // namespace container_internal
} // namespace absl
enum Hash : size_t {
kStd = 0x2, // std::hash
#ifdef _MSC_VER
kExtension = kStd, // In MSVC, std::hash == ::hash
#else // _MSC_VER
kExtension = 0x4, // ::hash (GCC extension)
#endif // _MSC_VER
};
// H is a bitmask of Hash enumerations.
// Hashable<H> is hashable via all means specified in H.
template <int H>
struct Hashable {
static constexpr bool HashableBy(Hash h) { return h & H; }
};
namespace std {
template <int H>
struct hash<Hashable<H>> {
template <class E = Hashable<H>,
class = typename std::enable_if<E::HashableBy(kStd)>::type>
size_t operator()(E) const {
return kStd;
}
};
} // namespace std
namespace absl {
namespace container_internal {
namespace {
template <class T>
size_t Hash(const T& v) {
return hash_default_hash<T>()(v);
}
TEST(Delegate, HashDispatch) {
EXPECT_EQ(Hash(kStd), Hash(Hashable<kStd>()));
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/hash_generator_testing.h"
#include <deque>
namespace absl {
namespace container_internal {
namespace hash_internal {
namespace {
class RandomDeviceSeedSeq {
public:
using result_type = typename std::random_device::result_type;
template <class Iterator>
void generate(Iterator start, Iterator end) {
while (start != end) {
*start = gen_();
++start;
}
}
private:
std::random_device gen_;
};
} // namespace
std::mt19937_64* GetThreadLocalRng() {
RandomDeviceSeedSeq seed_seq;
thread_local auto* rng = new std::mt19937_64(seed_seq);
return rng;
}
std::string Generator<std::string>::operator()() const {
// NOLINTNEXTLINE(runtime/int)
std::uniform_int_distribution<short> chars(0x20, 0x7E);
std::string res;
res.resize(32);
std::generate(res.begin(), res.end(),
[&]() { return chars(*GetThreadLocalRng()); });
return res;
}
absl::string_view Generator<absl::string_view>::operator()() const {
static auto* arena = new std::deque<std::string>();
// NOLINTNEXTLINE(runtime/int)
std::uniform_int_distribution<short> chars(0x20, 0x7E);
arena->emplace_back();
auto& res = arena->back();
res.resize(32);
std::generate(res.begin(), res.end(),
[&]() { return chars(*GetThreadLocalRng()); });
return res;
}
} // namespace hash_internal
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Generates random values for testing. Specialized only for the few types we
// care about.
#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
#include <stdint.h>
#include <algorithm>
#include <iosfwd>
#include <random>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
namespace hash_internal {
namespace generator_internal {
template <class Container, class = void>
struct IsMap : std::false_type {};
template <class Map>
struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
} // namespace generator_internal
std::mt19937_64* GetThreadLocalRng();
enum Enum {
kEnumEmpty,
kEnumDeleted,
};
enum class EnumClass : uint64_t {
kEmpty,
kDeleted,
};
inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
return o << static_cast<uint64_t>(ec);
}
template <class T, class E = void>
struct Generator;
template <class T>
struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
T operator()() const {
std::uniform_int_distribution<T> dist;
return dist(*GetThreadLocalRng());
}
};
template <>
struct Generator<Enum> {
Enum operator()() const {
std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
dist;
while (true) {
auto variate = dist(*GetThreadLocalRng());
if (variate != kEnumEmpty && variate != kEnumDeleted)
return static_cast<Enum>(variate);
}
}
};
template <>
struct Generator<EnumClass> {
EnumClass operator()() const {
std::uniform_int_distribution<
typename std::underlying_type<EnumClass>::type>
dist;
while (true) {
EnumClass variate = static_cast<EnumClass>(dist(*GetThreadLocalRng()));
if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
return static_cast<EnumClass>(variate);
}
}
};
template <>
struct Generator<std::string> {
std::string operator()() const;
};
template <>
struct Generator<absl::string_view> {
absl::string_view operator()() const;
};
template <>
struct Generator<NonStandardLayout> {
NonStandardLayout operator()() const {
return NonStandardLayout(Generator<std::string>()());
}
};
template <class K, class V>
struct Generator<std::pair<K, V>> {
std::pair<K, V> operator()() const {
return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
Generator<typename std::decay<V>::type>()());
}
};
template <class... Ts>
struct Generator<std::tuple<Ts...>> {
std::tuple<Ts...> operator()() const {
return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
}
};
template <class U>
struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
decltype(std::declval<U&>().value())>>
: Generator<std::pair<
typename std::decay<decltype(std::declval<U&>().key())>::type,
typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
template <class Container>
using GeneratedType = decltype(
std::declval<const Generator<
typename std::conditional<generator_internal::IsMap<Container>::value,
typename Container::value_type,
typename Container::key_type>::type>&>()());
} // namespace hash_internal
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Utilities to help tests verify that hash tables properly handle stateful
// allocators and hash functions.
#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
#include <cstdlib>
#include <limits>
#include <memory>
#include <ostream>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
namespace hash_testing_internal {
template <class Derived>
struct WithId {
WithId() : id_(next_id<Derived>()) {}
WithId(const WithId& that) : id_(that.id_) {}
WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
WithId& operator=(const WithId& that) {
id_ = that.id_;
return *this;
}
WithId& operator=(WithId&& that) {
id_ = that.id_;
that.id_ = 0;
return *this;
}
size_t id() const { return id_; }
friend bool operator==(const WithId& a, const WithId& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
protected:
explicit WithId(size_t id) : id_(id) {}
private:
size_t id_;
template <class T>
static size_t next_id() {
// 0 is reserved for moved from state.
static size_t gId = 1;
return gId++;
}
};
} // namespace hash_testing_internal
struct NonStandardLayout {
NonStandardLayout() {}
explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
virtual ~NonStandardLayout() {}
friend bool operator==(const NonStandardLayout& a,
const NonStandardLayout& b) {
return a.value == b.value;
}
friend bool operator!=(const NonStandardLayout& a,
const NonStandardLayout& b) {
return a.value != b.value;
}
template <typename H>
friend H AbslHashValue(H h, const NonStandardLayout& v) {
return H::combine(std::move(h), v.value);
}
std::string value;
};
struct StatefulTestingHash
: absl::container_internal::hash_testing_internal::WithId<
StatefulTestingHash> {
template <class T>
size_t operator()(const T& t) const {
return absl::Hash<T>{}(t);
}
};
struct StatefulTestingEqual
: absl::container_internal::hash_testing_internal::WithId<
StatefulTestingEqual> {
template <class T, class U>
bool operator()(const T& t, const U& u) const {
return t == u;
}
};
// It is expected that Alloc() == Alloc() for all allocators so we cannot use
// WithId base. We need to explicitly assign ids.
template <class T = int>
struct Alloc : std::allocator<T> {
using propagate_on_container_swap = std::true_type;
// Using old paradigm for this to ensure compatibility.
explicit Alloc(size_t id = 0) : id_(id) {}
Alloc(const Alloc&) = default;
Alloc& operator=(const Alloc&) = default;
template <class U>
Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
template <class U>
struct rebind {
using other = Alloc<U>;
};
size_t id() const { return id_; }
friend bool operator==(const Alloc& a, const Alloc& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
private:
size_t id_ = std::numeric_limits<size_t>::max();
};
template <class Map>
auto items(const Map& m) -> std::vector<
std::pair<typename Map::key_type, typename Map::mapped_type>> {
using std::get;
std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
res.reserve(m.size());
for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
return res;
}
template <class Set>
auto keys(const Set& s)
-> std::vector<typename std::decay<typename Set::key_type>::type> {
std::vector<typename std::decay<typename Set::key_type>::type> res;
res.reserve(s.size());
for (const auto& v : s) res.emplace_back(v);
return res;
}
} // namespace container_internal
} // namespace absl
// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
// where the unordered containers are missing certain constructors that
// take allocator arguments. This test is defined ad-hoc for the platforms
// we care about (notably Crosstool 17) because libstdcxx's useless
// versioning scheme precludes a more principled solution.
#if defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425
#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
#else
#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
#endif
#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/hash_policy_testing.h"
#include "gtest/gtest.h"
namespace absl {
namespace container_internal {
namespace {
TEST(_, Hash) {
StatefulTestingHash h1;
EXPECT_EQ(1, h1.id());
StatefulTestingHash h2;
EXPECT_EQ(2, h2.id());
StatefulTestingHash h1c(h1);
EXPECT_EQ(1, h1c.id());
StatefulTestingHash h2m(std::move(h2));
EXPECT_EQ(2, h2m.id());
EXPECT_EQ(0, h2.id());
StatefulTestingHash h3;
EXPECT_EQ(3, h3.id());
h3 = StatefulTestingHash();
EXPECT_EQ(4, h3.id());
h3 = std::move(h1);
EXPECT_EQ(1, h3.id());
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
#include <cstddef>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
namespace absl {
namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void>
struct hash_policy_traits {
private:
struct ReturnKey {
// We return `Key` here.
// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template <class Key, class... Args>
Key operator()(Key&& k, const Args&...) const {
return std::forward<Key>(k);
}
};
template <class P = Policy, class = void>
struct ConstantIteratorsImpl : std::false_type {};
template <class P>
struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
: P::constant_iterators {};
public:
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;
// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
using init_type = typename Policy::init_type;
using reference = decltype(Policy::element(std::declval<slot_type*>()));
using pointer = typename std::remove_reference<reference>::type*;
using value_type = typename std::remove_reference<reference>::type;
// Policies can set this variable to tell raw_hash_set that all iterators
// should be constant, even `iterator`. This is useful for set-like
// containers.
// Defaults to false if not provided by the policy.
using constant_iterators = ConstantIteratorsImpl<>;
// PRECONDITION: `slot` is UNINITIALIZED
// POSTCONDITION: `slot` is INITIALIZED
template <class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
Policy::construct(alloc, slot, std::forward<Args>(args)...);
}
// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is UNINITIALIZED
template <class Alloc>
static void destroy(Alloc* alloc, slot_type* slot) {
Policy::destroy(alloc, slot);
}
// Transfers the `old_slot` to `new_slot`. Any memory allocated by the
// allocator inside `old_slot` to `new_slot` can be transfered.
//
// OPTIONAL: defaults to:
//
// clone(new_slot, std::move(*old_slot));
// destroy(old_slot);
//
// PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
// POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
// UNINITIALIZED
template <class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
transfer_impl(alloc, new_slot, old_slot, 0);
}
// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is INITIALIZED
template <class P = Policy>
static auto element(slot_type* slot) -> decltype(P::element(slot)) {
return P::element(slot);
}
// Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
//
// If `slot` is nullptr, returns the constant amount of memory owned by any
// full slot or -1 if slots own variable amounts of memory.
//
// PRECONDITION: `slot` is INITIALIZED or nullptr
template <class P = Policy>
static size_t space_used(const slot_type* slot) {
return P::space_used(slot);
}
// Provides generalized access to the key for elements, both for elements in
// the table and for elements that have not yet been inserted (or even
// constructed). We would like an API that allows us to say: `key(args...)`
// but we cannot do that for all cases, so we use this more general API that
// can be used for many things, including the following:
//
// - Given an element in a table, get its key.
// - Given an element initializer, get its key.
// - Given `emplace()` arguments, get the element key.
//
// Implementations of this must adhere to a very strict technical
// specification around aliasing and consuming arguments:
//
// Let `value_type` be the result type of `element()` without ref- and
// cv-qualifiers. The first argument is a functor, the rest are constructor
// arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
// `k` is the element key, and `xs...` are the new constructor arguments for
// `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
// `ts...`. The key won't be touched once `xs...` are used to construct an
// element; `ts...` won't be touched at all, which allows `apply()` to consume
// any rvalues among them.
//
// If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
// trigger a hard compile error unless it originates from `f`. In other words,
// `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
// constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
//
// If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
// `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
template <class F, class... Ts, class P = Policy>
static auto apply(F&& f, Ts&&... ts)
-> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
}
// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template <class P = Policy>
static auto key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot));
}
// Returns the "value" (as opposed to the "key") portion of the element. Used
// by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
template <class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem)) {
return P::value(elem);
}
private:
// Use auto -> decltype as an enabler.
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, int)
-> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
P::transfer(alloc, new_slot, old_slot);
}
template <class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, char) {
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/hash_policy_traits.h"
#include <functional>
#include <memory>
#include <new>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace absl {
namespace container_internal {
namespace {
using ::testing::MockFunction;
using ::testing::Return;
using ::testing::ReturnRef;
using Alloc = std::allocator<int>;
using Slot = int;
struct PolicyWithoutOptionalOps {
using slot_type = Slot;
using key_type = Slot;
using init_type = Slot;
static std::function<void(void*, Slot*, Slot)> construct;
static std::function<void(void*, Slot*)> destroy;
static std::function<Slot&(Slot*)> element;
static int apply(int v) { return apply_impl(v); }
static std::function<int(int)> apply_impl;
static std::function<Slot&(Slot*)> value;
};
std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value;
struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
static std::function<void(void*, Slot*, Slot*)> transfer;
};
std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
struct Test : ::testing::Test {
Test() {
PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
construct.Call(a1, a2, std::move(a3));
};
PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
destroy.Call(a1, a2);
};
PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
return element.Call(a1);
};
PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int {
return apply.Call(a1);
};
PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& {
return value.Call(a1);
};
PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
return transfer.Call(a1, a2, a3);
};
}
std::allocator<int> alloc;
int a = 53;
MockFunction<void(void*, Slot*, Slot)> construct;
MockFunction<void(void*, Slot*)> destroy;
MockFunction<Slot&(Slot*)> element;
MockFunction<int(int)> apply;
MockFunction<Slot&(Slot*)> value;
MockFunction<void(void*, Slot*, Slot*)> transfer;
};
TEST_F(Test, construct) {
EXPECT_CALL(construct, Call(&alloc, &a, 53));
hash_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
}
TEST_F(Test, destroy) {
EXPECT_CALL(destroy, Call(&alloc, &a));
hash_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
}
TEST_F(Test, element) {
int b = 0;
EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::element(&a));
}
TEST_F(Test, apply) {
EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337));
EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42)));
}
TEST_F(Test, value) {
int b = 0;
EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b));
EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
}
TEST_F(Test, without_transfer) {
int b = 42;
EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b));
EXPECT_CALL(construct, Call(&alloc, &a, b));
EXPECT_CALL(destroy, Call(&alloc, &b));
hash_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
}
TEST_F(Test, with_transfer) {
int b = 42;
EXPECT_CALL(transfer, Call(&alloc, &a, &b));
hash_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This library provides APIs to debug the probing behavior of hash tables.
//
// In general, the probing behavior is a black box for users and only the
// side effects can be measured in the form of performance differences.
// These APIs give a glimpse on the actual behavior of the probing algorithms in
// these hashtables given a specified hash function and a set of elements.
//
// The probe count distribution can be used to assess the quality of the hash
// function for that particular hash table. Note that a hash function that
// performs well in one hash table implementation does not necessarily performs
// well in a different one.
//
// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
// absl::{flat,node,string}_hash_{set,map}.
#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
#include <cstddef>
#include <algorithm>
#include <type_traits>
#include <vector>
#include "absl/container/internal/hashtable_debug_hooks.h"
namespace absl {
namespace container_internal {
// Returns the number of probes required to lookup `key`. Returns 0 for a
// search with no collisions. Higher values mean more hash collisions occurred;
// however, the exact meaning of this number varies according to the container
// type.
template <typename C>
size_t GetHashtableDebugNumProbes(
const C& c, const typename C::key_type& key) {
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::GetNumProbes(c, key);
}
// Gets a histogram of the number of probes for each elements in the container.
// The sum of all the values in the vector is equal to container.size().
template <typename C>
std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
std::vector<size_t> v;
for (auto it = container.begin(); it != container.end(); ++it) {
size_t num_probes = GetHashtableDebugNumProbes(
container,
absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
v.resize(std::max(v.size(), num_probes + 1));
v[num_probes]++;
}
return v;
}
struct HashtableDebugProbeSummary {
size_t total_elements;
size_t total_num_probes;
double mean;
};
// Gets a summary of the probe count distribution for the elements in the
// container.
template <typename C>
HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
auto probes = GetHashtableDebugNumProbesHistogram(container);
HashtableDebugProbeSummary summary = {};
for (size_t i = 0; i < probes.size(); ++i) {
summary.total_elements += probes[i];
summary.total_num_probes += probes[i] * i;
}
summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
return summary;
}
// Returns the number of bytes requested from the allocator by the container
// and not freed.
template <typename C>
size_t AllocatedByteSize(const C& c) {
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::AllocatedByteSize(c);
}
// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
// and `c.size()` is equal to `num_elements`.
template <typename C>
size_t LowerBoundAllocatedByteSize(size_t num_elements) {
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
}
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Provides the internal API for hashtable_debug.h.
#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
#include <cstddef>
#include <algorithm>
#include <type_traits>
#include <vector>
namespace absl {
namespace container_internal {
namespace hashtable_debug_internal {
// If it is a map, call get<0>().
using std::get;
template <typename T, typename = typename T::mapped_type>
auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
return get<0>(pair);
}
// If it is not a map, return the value directly.
template <typename T>
const typename T::key_type& GetKey(const typename T::key_type& key, char) {
return key;
}
// Containers should specialize this to provide debug information for that
// container.
template <class Container, typename Enabler = void>
struct HashtableDebugAccess {
// Returns the number of probes required to find `key` in `c`. The "number of
// probes" is a concept that can vary by container. Implementations should
// return 0 when `key` was found in the minimum number of operations and
// should increment the result for each non-trivial operation required to find
// `key`.
//
// The default implementation uses the bucket api from the standard and thus
// works for `std::unordered_*` containers.
static size_t GetNumProbes(const Container& c,
const typename Container::key_type& key) {
if (!c.bucket_count()) return {};
size_t num_probes = 0;
size_t bucket = c.bucket(key);
for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
if (it == e) return num_probes;
if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
}
}
// Returns the number of bytes requested from the allocator by the container
// and not freed.
//
// static size_t AllocatedByteSize(const Container& c);
// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
// `Container` and `c.size()` is equal to `num_elements`.
//
// static size_t LowerBoundAllocatedByteSize(size_t num_elements);
};
} // namespace hashtable_debug_internal
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// MOTIVATION AND TUTORIAL
//
// If you want to put in a single heap allocation N doubles followed by M ints,
// it's easy if N and M are known at compile time.
//
// struct S {
// double a[N];
// int b[M];
// };
//
// S* p = new S;
//
// But what if N and M are known only in run time? Class template Layout to the
// rescue! It's a portable generalization of the technique known as struct hack.
//
// // This object will tell us everything we need to know about the memory
// // layout of double[N] followed by int[M]. It's structurally identical to
// // size_t[2] that stores N and M. It's very cheap to create.
// const Layout<double, int> layout(N, M);
//
// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
// // memory is needed. We are free to use any allocation function we want as
// // long as it returns aligned memory.
// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
//
// // Obtain the pointer to the array of doubles.
// // Equivalent to `reinterpret_cast<double*>(p.get())`.
// //
// // We could have written layout.Pointer<0>(p) instead. If all the types are
// // unique you can use either form, but if some types are repeated you must
// // use the index form.
// double* a = layout.Pointer<double>(p.get());
//
// // Obtain the pointer to the array of ints.
// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
// int* b = layout.Pointer<int>(p);
//
// If we are unable to specify sizes of all fields, we can pass as many sizes as
// we can to `Partial()`. In return, it'll allow us to access the fields whose
// locations and sizes can be computed from the provided information.
// `Partial()` comes in handy when the array sizes are embedded into the
// allocation.
//
// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
// using L = Layout<size_t, size_t, double, int>;
//
// unsigned char* Allocate(size_t n, size_t m) {
// const L layout(1, 1, n, m);
// unsigned char* p = new unsigned char[layout.AllocSize()];
// *layout.Pointer<0>(p) = n;
// *layout.Pointer<1>(p) = m;
// return p;
// }
//
// void Use(unsigned char* p) {
// // First, extract N and M.
// // Specify that the first array has only one element. Using `prefix` we
// // can access the first two arrays but not more.
// constexpr auto prefix = L::Partial(1);
// size_t n = *prefix.Pointer<0>(p);
// size_t m = *prefix.Pointer<1>(p);
//
// // Now we can get pointers to the payload.
// const L layout(1, 1, n, m);
// double* a = layout.Pointer<double>(p);
// int* b = layout.Pointer<int>(p);
// }
//
// The layout we used above combines fixed-size with dynamically-sized fields.
// This is quite common. Layout is optimized for this use case and generates
// optimal code. All computations that can be performed at compile time are
// indeed performed at compile time.
//
// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
// padding in between arrays.
//
// You can manually override the alignment of an array by wrapping the type in
// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
// and behavior as `Layout<..., T, ...>` except that the first element of the
// array of `T` is aligned to `N` (the rest of the elements follow without
// padding). `N` cannot be less than `alignof(T)`.
//
// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
// memory layouts. Check out the reference or code below to discover more.
//
// EXAMPLE
//
// // Immutable move-only string with sizeof equal to sizeof(void*). The
// // string size and the characters are kept in the same heap allocation.
// class CompactString {
// public:
// CompactString(const char* s = "") {
// const size_t size = strlen(s);
// // size_t[1] followed by char[size + 1].
// const L layout(1, size + 1);
// p_.reset(new unsigned char[layout.AllocSize()]);
// // If running under ASAN, mark the padding bytes, if any, to catch
// // memory errors.
// layout.PoisonPadding(p_.get());
// // Store the size in the allocation.
// *layout.Pointer<size_t>(p_.get()) = size;
// // Store the characters in the allocation.
// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
// }
//
// size_t size() const {
// // Equivalent to reinterpret_cast<size_t&>(*p).
// return *L::Partial().Pointer<size_t>(p_.get());
// }
//
// const char* c_str() const {
// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
// // The argument in Partial(1) specifies that we have size_t[1] in front
// // of the characters.
// return L::Partial(1).Pointer<char>(p_.get());
// }
//
// private:
// // Our heap allocation contains a size_t followed by an array of chars.
// using L = Layout<size_t, char>;
// std::unique_ptr<unsigned char[]> p_;
// };
//
// int main() {
// CompactString s = "hello";
// assert(s.size() == 5);
// assert(strcmp(s.c_str(), "hello") == 0);
// }
//
// DOCUMENTATION
//
// The interface exported by this file consists of:
// - class `Layout<>` and its public members.
// - The public members of class `internal_layout::LayoutImpl<>`. That class
// isn't intended to be used directly, and its name and template parameter
// list are internal implementation details, but the class itself provides
// most of the functionality in this file. See comments on its members for
// detailed documentation.
//
// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
// creates a `Layout` object, which exposes the same functionality by inheriting
// from `LayoutImpl<>`.
#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <ostream>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <utility>
#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
#if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif
#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace absl {
namespace container_internal {
// A type wrapper that instructs `Layout` to use the specific alignment for the
// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
// and behavior as `Layout<..., T, ...>` except that the first element of the
// array of `T` is aligned to `N` (the rest of the elements follow without
// padding).
//
// Requires: `N >= alignof(T)` and `N` is a power of 2.
template <class T, size_t N>
struct Aligned;
namespace internal_layout {
template <class T>
struct NotAligned {};
template <class T, size_t N>
struct NotAligned<const Aligned<T, N>> {
static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
};
template <size_t>
using IntToSize = size_t;
template <class>
using TypeToSize = size_t;
template <class T>
struct Type : NotAligned<T> {
using type = T;
};
template <class T, size_t N>
struct Type<Aligned<T, N>> {
using type = T;
};
template <class T>
struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
template <class T, size_t N>
struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
template <class T>
struct AlignOf : NotAligned<T>, std::integral_constant<size_t, alignof(T)> {};
template <class T, size_t N>
struct AlignOf<Aligned<T, N>> : std::integral_constant<size_t, N> {
static_assert(N % alignof(T) == 0,
"Custom alignment can't be lower than the type's alignment");
};
// Does `Ts...` contain `T`?
template <class T, class... Ts>
using Contains = absl::disjunction<std::is_same<T, Ts>...>;
template <class From, class To>
using CopyConst =
typename std::conditional<std::is_const<From>::value, const To, To>::type;
template <class T>
using SliceType = absl::Span<T>;
// This namespace contains no types. It prevents functions defined in it from
// being found by ADL.
namespace adl_barrier {
template <class Needle, class... Ts>
constexpr size_t Find(Needle, Needle, Ts...) {
static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
return 0;
}
template <class Needle, class T, class... Ts>
constexpr size_t Find(Needle, T, Ts...) {
return adl_barrier::Find(Needle(), Ts()...) + 1;
}
constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
// Returns `q * m` for the smallest `q` such that `q * m >= n`.
// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
constexpr size_t Max(size_t a) { return a; }
template <class... Ts>
constexpr size_t Max(size_t a, size_t b, Ts... rest) {
return adl_barrier::Max(b < a ? a : b, rest...);
}
template <class T>
std::string TypeName() {
std::string out;
int status = 0;
char* demangled = nullptr;
#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
#endif
if (status == 0 && demangled != nullptr) { // Demangling succeeeded.
absl::StrAppend(&out, "<", demangled, ">");
free(demangled);
} else {
#if defined(__GXX_RTTI) || defined(_CPPRTTI)
absl::StrAppend(&out, "<", typeid(T).name(), ">");
#endif
}
return out;
}
} // namespace adl_barrier
template <bool C>
using EnableIf = typename std::enable_if<C, int>::type;
// Can `T` be a template argument of `Layout`?
template <class T>
using IsLegalElementType = std::integral_constant<
bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
!std::is_reference<typename Type<T>::type>::value &&
!std::is_volatile<typename Type<T>::type>::value &&
adl_barrier::IsPow2(AlignOf<T>::value)>;
template <class Elements, class SizeSeq, class OffsetSeq>
class LayoutImpl;
// Public base class of `Layout` and the result type of `Layout::Partial()`.
//
// `Elements...` contains all template arguments of `Layout` that created this
// instance.
//
// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
// passed to `Layout::Partial()` or `Layout::Layout()`.
//
// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
// can compute offsets).
template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>> {
private:
static_assert(sizeof...(Elements) > 0, "At least one field is required");
static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
"Invalid element type (see IsLegalElementType)");
enum {
NumTypes = sizeof...(Elements),
NumSizes = sizeof...(SizeSeq),
NumOffsets = sizeof...(OffsetSeq),
};
// These are guaranteed by `Layout`.
static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
"Internal error");
static_assert(NumTypes > 0, "Internal error");
// Returns the index of `T` in `Elements...`. Results in a compilation error
// if `Elements...` doesn't contain exactly one instance of `T`.
template <class T>
static constexpr size_t ElementIndex() {
static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
"Type not found");
return adl_barrier::Find(Type<T>(),
Type<typename Type<Elements>::type>()...);
}
template <size_t N>
using ElementAlignment =
AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
public:
// Element types of all arrays packed in a tuple.
using ElementTypes = std::tuple<typename Type<Elements>::type...>;
// Element type of the Nth array.
template <size_t N>
using ElementType = typename std::tuple_element<N, ElementTypes>::type;
constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
: size_{sizes...} {}
// Alignment of the layout, equal to the strictest alignment of all elements.
// All pointers passed to the methods of layout must be aligned to this value.
static constexpr size_t Alignment() {
return adl_barrier::Max(AlignOf<Elements>::value...);
}
// Offset in bytes of the Nth array.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Offset<0>() == 0); // The ints starts from 0.
// assert(x.Offset<1>() == 16); // The doubles starts from 16.
//
// Requires: `N <= NumSizes && N < sizeof...(Ts)`.
template <size_t N, EnableIf<N == 0> = 0>
constexpr size_t Offset() const {
return 0;
}
template <size_t N, EnableIf<N != 0> = 0>
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
ElementAlignment<N>());
}
// Offset in bytes of the array with the specified element type. There must
// be exactly one such array and its zero-based index must be at most
// `NumSizes`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Offset<int>() == 0); // The ints starts from 0.
// assert(x.Offset<double>() == 16); // The doubles starts from 16.
template <class T>
constexpr size_t Offset() const {
return Offset<ElementIndex<T>()>();
}
// Offsets in bytes of all arrays for which the offsets are known.
constexpr std::array<size_t, NumOffsets> Offsets() const {
return {{Offset<OffsetSeq>()...}};
}
// The number of elements in the Nth array. This is the Nth argument of
// `Layout::Partial()` or `Layout::Layout()` (zero-based).
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Size<0>() == 3);
// assert(x.Size<1>() == 4);
//
// Requires: `N < NumSizes`.
template <size_t N>
constexpr size_t Size() const {
static_assert(N < NumSizes, "Index out of bounds");
return size_[N];
}
// The number of elements in the array with the specified element type.
// There must be exactly one such array and its zero-based index must be
// at most `NumSizes`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// assert(x.Size<int>() == 3);
// assert(x.Size<double>() == 4);
template <class T>
constexpr size_t Size() const {
return Size<ElementIndex<T>()>();
}
// The number of elements of all arrays for which they are known.
constexpr std::array<size_t, NumSizes> Sizes() const {
return {{Size<SizeSeq>()...}};
}
// Pointer to the beginning of the Nth array.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = unsigned char[x.AllocSize()];
// int* ints = x.Pointer<0>(p);
// double* doubles = x.Pointer<1>(p);
//
// Requires: `N <= NumSizes && N < sizeof...(Ts)`.
// Requires: `p` is aligned to `Alignment()`.
template <size_t N, class Char>
CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
using C = typename std::remove_const<Char>::type;
static_assert(
std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
std::is_same<C, signed char>(),
"The argument must be a pointer to [const] [signed|unsigned] char");
constexpr size_t alignment = Alignment();
(void)alignment;
assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
}
// Pointer to the beginning of the array with the specified element type.
// There must be exactly one such array and its zero-based index must be at
// most `NumSizes`.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// int* ints = x.Pointer<int>(p);
// double* doubles = x.Pointer<double>(p);
//
// Requires: `p` is aligned to `Alignment()`.
template <class T, class Char>
CopyConst<Char, T>* Pointer(Char* p) const {
return Pointer<ElementIndex<T>()>(p);
}
// Pointers to all arrays for which pointers are known.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
//
// int* ints;
// double* doubles;
// std::tie(ints, doubles) = x.Pointers(p);
//
// Requires: `p` is aligned to `Alignment()`.
//
// Note: We're not using ElementType alias here because it does not compile
// under MSVC.
template <class Char>
std::tuple<CopyConst<
Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
Pointers(Char* p) const {
return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
Pointer<OffsetSeq>(p)...);
}
// The Nth array.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// Span<int> ints = x.Slice<0>(p);
// Span<double> doubles = x.Slice<1>(p);
//
// Requires: `N < NumSizes`.
// Requires: `p` is aligned to `Alignment()`.
template <size_t N, class Char>
SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
}
// The array with the specified element type. There must be exactly one
// such array and its zero-based index must be less than `NumSizes`.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
// Span<int> ints = x.Slice<int>(p);
// Span<double> doubles = x.Slice<double>(p);
//
// Requires: `p` is aligned to `Alignment()`.
template <class T, class Char>
SliceType<CopyConst<Char, T>> Slice(Char* p) const {
return Slice<ElementIndex<T>()>(p);
}
// All arrays with known sizes.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()];
//
// Span<int> ints;
// Span<double> doubles;
// std::tie(ints, doubles) = x.Slices(p);
//
// Requires: `p` is aligned to `Alignment()`.
//
// Note: We're not using ElementType alias here because it does not compile
// under MSVC.
template <class Char>
std::tuple<SliceType<CopyConst<
Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
Slices(Char* p) const {
// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
// in 6.1).
(void)p;
return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
Slice<SizeSeq>(p)...);
}
// The size of the allocation that fits all arrays.
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
// unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
//
// Requires: `NumSizes == sizeof...(Ts)`.
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
}
// If built with --config=asan, poisons padding bytes (if any) in the
// allocation. The pointer must point to a memory block at least
// `AllocSize()` bytes in length.
//
// `Char` must be `[const] [signed|unsigned] char`.
//
// Requires: `p` is aligned to `Alignment()`.
template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
void PoisonPadding(const Char* p) const {
Pointer<0>(p); // verify the requirements on `Char` and `p`
}
template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
#ifdef ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>() % ElementAlignment<N>()) {
size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
}
// Human-readable description of the memory layout. Useful for debugging.
// Slow.
//
// // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
// // by an unknown number of doubles.
// auto x = Layout<char, int, double>::Partial(5, 3);
// assert(x.DebugString() ==
// "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
//
// Each field is in the following format: @offset<type>(sizeof)[size] (<type>
// may be missing depending on the target platform). For example,
// @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
// int is 4 bytes, and we have 3 of those ints. The size of the last field may
// be missing (as in the example above). Only fields with known offsets are
// described. Type names may differ across platforms: one compiler might
// produce "unsigned*" where another produces "unsigned int *".
std::string DebugString() const {
const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
const std::string types[] = {adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) {
absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
"(", sizes[i + 1], ")");
}
// NumSizes is a constant that may be zero. Some compilers cannot see that
// inside the if statement "size_[NumSizes - 1]" must be valid.
int last = static_cast<int>(NumSizes) - 1;
if (NumTypes == NumSizes && last >= 0) {
absl::StrAppend(&res, "[", size_[last], "]");
}
return res;
}
private:
// Arguments of `Layout::Partial()` or `Layout::Layout()`.
size_t size_[NumSizes > 0 ? NumSizes : 1];
};
template <size_t NumSizes, class... Ts>
using LayoutType = LayoutImpl<
std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
} // namespace internal_layout
// Descriptor of arrays of various types and sizes laid out in memory one after
// another. See the top of the file for documentation.
//
// Check out the public API of internal_layout::LayoutImpl above. The type is
// internal to the library but its methods are public, and they are inherited
// by `Layout`.
template <class... Ts>
class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
public:
static_assert(sizeof...(Ts) > 0, "At least one field is required");
static_assert(
absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
"Invalid element type (see IsLegalElementType)");
// The result type of `Partial()` with `NumSizes` arguments.
template <size_t NumSizes>
using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
// `Layout` knows the element types of the arrays we want to lay out in
// memory but not the number of elements in each array.
// `Partial(size1, ..., sizeN)` allows us to specify the latter. The
// resulting immutable object can be used to obtain pointers to the
// individual arrays.
//
// It's allowed to pass fewer array sizes than the number of arrays. E.g.,
// if all you need is to the offset of the second array, you only need to
// pass one argument -- the number of elements in the first arrays.
//
// // int[3] followed by 4 bytes of padding and an unknown number of
// // doubles.
// auto x = Layout<int, double>::Partial(3);
// // doubles start at byte 16.
// assert(x.Offset<1>() == 16);
//
// If you know the number of elements in all arrays, you can still call
// `Partial()` but it's more convenient to use the constructor of `Layout`.
//
// Layout<int, double> x(3, 5);
//
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
//
// Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
// Requires: all arguments are convertible to `size_t`.
template <class... Sizes>
static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
}
// Creates a layout with the sizes of all arrays specified. If you know
// only the sizes of the first N arrays (where N can be zero), you can use
// `Partial()` defined above. The constructor is essentially equivalent to
// calling `Partial()` and passing in all array sizes; the constructor is
// provided as a convenient abbreviation.
//
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
: internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/layout.h"
// We need ::max_align_t because some libstdc++ versions don't provide
// std::max_align_t
#include <stddef.h>
#include <cstdint>
#include <memory>
#include <sstream>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/types/span.h"
namespace absl {
namespace container_internal {
namespace {
using ::absl::Span;
using ::testing::ElementsAre;
size_t Distance(const void* from, const void* to) {
ABSL_RAW_CHECK(from <= to, "Distance must be non-negative");
return static_cast<const char*>(to) - static_cast<const char*>(from);
}
template <class Expected, class Actual>
Expected Type(Actual val) {
static_assert(std::is_same<Expected, Actual>(), "");
return val;
}
using Int128 = int64_t[2];
// Properties of types that this test relies on.
static_assert(sizeof(int8_t) == 1, "");
static_assert(alignof(int8_t) == 1, "");
static_assert(sizeof(int16_t) == 2, "");
static_assert(alignof(int16_t) == 2, "");
static_assert(sizeof(int32_t) == 4, "");
static_assert(alignof(int32_t) == 4, "");
static_assert(sizeof(Int128) == 16, "");
static_assert(alignof(Int128) == 8, "");
template <class Expected, class Actual>
void SameType() {
static_assert(std::is_same<Expected, Actual>(), "");
}
TEST(Layout, ElementType) {
{
using L = Layout<int32_t>;
SameType<int32_t, L::ElementType<0>>();
SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
}
{
using L = Layout<int32_t, int32_t>;
SameType<int32_t, L::ElementType<0>>();
SameType<int32_t, L::ElementType<1>>();
SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
SameType<int32_t, decltype(L::Partial())::ElementType<1>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
}
{
using L = Layout<int8_t, int32_t, Int128>;
SameType<int8_t, L::ElementType<0>>();
SameType<int32_t, L::ElementType<1>>();
SameType<Int128, L::ElementType<2>>();
SameType<int8_t, decltype(L::Partial())::ElementType<0>>();
SameType<int8_t, decltype(L::Partial(0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
SameType<int8_t, decltype(L::Partial(0, 0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0, 0))::ElementType<1>>();
SameType<Int128, decltype(L::Partial(0, 0))::ElementType<2>>();
SameType<int8_t, decltype(L::Partial(0, 0, 0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0, 0, 0))::ElementType<1>>();
SameType<Int128, decltype(L::Partial(0, 0, 0))::ElementType<2>>();
}
}
TEST(Layout, ElementTypes) {
{
using L = Layout<int32_t>;
SameType<std::tuple<int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t>, decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t>, decltype(L::Partial(0))::ElementTypes>();
}
{
using L = Layout<int32_t, int32_t>;
SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
}
{
using L = Layout<int8_t, int32_t, Int128>;
SameType<std::tuple<int8_t, int32_t, Int128>, L::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial(0))::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial(0, 0))::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial(0, 0, 0))::ElementTypes>();
}
}
TEST(Layout, OffsetByIndex) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial().Offset<0>());
EXPECT_EQ(0, L::Partial(3).Offset<0>());
EXPECT_EQ(0, L(3).Offset<0>());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, L::Partial().Offset<0>());
EXPECT_EQ(0, L::Partial(3).Offset<0>());
EXPECT_EQ(12, L::Partial(3).Offset<1>());
EXPECT_EQ(0, L::Partial(3, 5).Offset<0>());
EXPECT_EQ(12, L::Partial(3, 5).Offset<1>());
EXPECT_EQ(0, L(3, 5).Offset<0>());
EXPECT_EQ(12, L(3, 5).Offset<1>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, L::Partial().Offset<0>());
EXPECT_EQ(0, L::Partial(0).Offset<0>());
EXPECT_EQ(0, L::Partial(0).Offset<1>());
EXPECT_EQ(0, L::Partial(1).Offset<0>());
EXPECT_EQ(4, L::Partial(1).Offset<1>());
EXPECT_EQ(0, L::Partial(5).Offset<0>());
EXPECT_EQ(8, L::Partial(5).Offset<1>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<0>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<1>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(1, 0).Offset<0>());
EXPECT_EQ(4, L::Partial(1, 0).Offset<1>());
EXPECT_EQ(8, L::Partial(1, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(5, 3).Offset<0>());
EXPECT_EQ(8, L::Partial(5, 3).Offset<1>());
EXPECT_EQ(24, L::Partial(5, 3).Offset<2>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>());
EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>());
EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>());
EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>());
EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>());
EXPECT_EQ(0, L(5, 3, 1).Offset<0>());
EXPECT_EQ(24, L(5, 3, 1).Offset<2>());
EXPECT_EQ(8, L(5, 3, 1).Offset<1>());
}
}
TEST(Layout, OffsetByType) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial().Offset<int32_t>());
EXPECT_EQ(0, L::Partial(3).Offset<int32_t>());
EXPECT_EQ(0, L(3).Offset<int32_t>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, L::Partial().Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0).Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(1).Offset<int8_t>());
EXPECT_EQ(4, L::Partial(1).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(5).Offset<int8_t>());
EXPECT_EQ(8, L::Partial(5).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(1, 0).Offset<int8_t>());
EXPECT_EQ(4, L::Partial(1, 0).Offset<int32_t>());
EXPECT_EQ(8, L::Partial(1, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(5, 3).Offset<int8_t>());
EXPECT_EQ(8, L::Partial(5, 3).Offset<int32_t>());
EXPECT_EQ(24, L::Partial(5, 3).Offset<Int128>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<int8_t>());
EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<int32_t>());
EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<int8_t>());
EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<Int128>());
EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<int32_t>());
EXPECT_EQ(0, L(5, 3, 1).Offset<int8_t>());
EXPECT_EQ(24, L(5, 3, 1).Offset<Int128>());
EXPECT_EQ(8, L(5, 3, 1).Offset<int32_t>());
}
}
TEST(Layout, Offsets) {
{
using L = Layout<int32_t>;
EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0));
EXPECT_THAT(L(3).Offsets(), ElementsAre(0));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12));
EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12));
EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4));
EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8));
EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0));
EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8));
EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0));
EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8));
EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
}
}
TEST(Layout, AllocSize) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).AllocSize());
EXPECT_EQ(12, L::Partial(3).AllocSize());
EXPECT_EQ(12, L(3).AllocSize());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(32, L::Partial(3, 5).AllocSize());
EXPECT_EQ(32, L(3, 5).AllocSize());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize());
EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize());
EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize());
EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize());
EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize());
EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize());
EXPECT_EQ(136, L(3, 5, 7).AllocSize());
}
}
TEST(Layout, SizeByIndex) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Size<0>());
EXPECT_EQ(3, L::Partial(3).Size<0>());
EXPECT_EQ(3, L(3).Size<0>());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, L::Partial(0).Size<0>());
EXPECT_EQ(3, L::Partial(3).Size<0>());
EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
EXPECT_EQ(3, L(3, 5).Size<0>());
EXPECT_EQ(5, L(3, 5).Size<1>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Size<0>());
EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>());
EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>());
EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>());
EXPECT_EQ(3, L(3, 5, 7).Size<0>());
EXPECT_EQ(5, L(3, 5, 7).Size<1>());
EXPECT_EQ(7, L(3, 5, 7).Size<2>());
}
}
TEST(Layout, SizeByType) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Size<int32_t>());
EXPECT_EQ(3, L::Partial(3).Size<int32_t>());
EXPECT_EQ(3, L(3).Size<int32_t>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Size<int8_t>());
EXPECT_EQ(3, L::Partial(3, 5).Size<int8_t>());
EXPECT_EQ(5, L::Partial(3, 5).Size<int32_t>());
EXPECT_EQ(3, L::Partial(3, 5, 7).Size<int8_t>());
EXPECT_EQ(5, L::Partial(3, 5, 7).Size<int32_t>());
EXPECT_EQ(7, L::Partial(3, 5, 7).Size<Int128>());
EXPECT_EQ(3, L(3, 5, 7).Size<int8_t>());
EXPECT_EQ(5, L(3, 5, 7).Size<int32_t>());
EXPECT_EQ(7, L(3, 5, 7).Size<Int128>());
}
}
TEST(Layout, Sizes) {
{
using L = Layout<int32_t>;
EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(L(3).Sizes(), ElementsAre(3));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
}
}
TEST(Layout, PointerByIndex) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(12,
Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
}
}
TEST(Layout, PointerByType) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const Int128*>(
L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const Int128*>(
L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(
L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
}
}
TEST(Layout, MutablePointerByIndex) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<0>(p))));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<int32_t*>(L(3, 5).Pointer<1>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
}
}
TEST(Layout, MutablePointerByType) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(
24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
}
}
TEST(Layout, Pointers) {
alignas(max_align_t) const unsigned char p[100] = {};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<const int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const L x(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
}
TEST(Layout, MutablePointers) {
alignas(max_align_t) unsigned char p[100];
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<int8_t*, int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
}
{
const L x(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
}
}
TEST(Layout, SliceByIndexSize) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L(3).Slice<0>(p).size());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
}
}
TEST(Layout, SliceByTypeSize) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
}
}
TEST(Layout, MutableSliceByIndexSize) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L(3).Slice<0>(p).size());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
}
}
TEST(Layout, MutableSliceByTypeSize) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
}
}
TEST(Layout, SliceByIndexData) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(p,
Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(12,
Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(
p,
Type<Span<const Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
TEST(Layout, SliceByTypeData) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
L::Partial(0, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
L::Partial(1, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
L::Partial(5, 3, 1).Slice<Int128>(p))
.data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p,
Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
TEST(Layout, MutableSliceByIndexData) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(12, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<1>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24, Distance(
p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
TEST(Layout, MutableSliceByTypeData) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4, Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(
p,
Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
MATCHER_P(IsSameSlice, slice, "") {
return arg.size() == slice.size() && arg.data() == slice.data();
}
template <typename... M>
class TupleMatcher {
public:
explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {}
template <typename Tuple>
bool MatchAndExplain(const Tuple& p,
testing::MatchResultListener* /* listener */) const {
static_assert(std::tuple_size<Tuple>::value == sizeof...(M), "");
return MatchAndExplainImpl(
p, absl::make_index_sequence<std::tuple_size<Tuple>::value>{});
}
// For the matcher concept. Left empty as we don't really need the diagnostics
// right now.
void DescribeTo(::std::ostream* os) const {}
void DescribeNegationTo(::std::ostream* os) const {}
private:
template <typename Tuple, size_t... Is>
bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence<Is...>) const {
// Using std::min as a simple variadic "and".
return std::min(
{true, testing::SafeMatcherCast<
const typename std::tuple_element<Is, Tuple>::type&>(
std::get<Is>(matchers_))
.Matches(std::get<Is>(p))...});
}
std::tuple<M...> matchers_;
};
template <typename... M>
testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) {
return testing::MakePolymorphicMatcher(
TupleMatcher<M...>(std::move(matchers)...));
}
TEST(Layout, Slices) {
alignas(max_align_t) const unsigned char p[100] = {};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
}
{
const auto x = L::Partial(1);
EXPECT_THAT(Type<std::tuple<Span<const int8_t>>>(x.Slices(p)),
Tuple(IsSameSlice(x.Slice<0>(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_THAT(
(Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const L x(1, 2, 3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, MutableSlices) {
alignas(max_align_t) unsigned char p[100] = {};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
}
{
const auto x = L::Partial(1);
EXPECT_THAT(Type<std::tuple<Span<int8_t>>>(x.Slices(p)),
Tuple(IsSameSlice(x.Slice<0>(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_THAT(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const L x(1, 2, 3);
EXPECT_THAT(
(Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, UnalignedTypes) {
constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4));
}
TEST(Layout, CustomAlignment) {
constexpr Layout<unsigned char, Aligned<unsigned char, 8>> x(1, 2);
alignas(max_align_t) unsigned char p[x.AllocSize()];
EXPECT_EQ(10, x.AllocSize());
EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8));
}
TEST(Layout, OverAligned) {
constexpr size_t M = alignof(max_align_t);
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
alignas(2 * M) unsigned char p[x.AllocSize()];
EXPECT_EQ(2 * M + 3, x.AllocSize());
EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
}
TEST(Layout, Alignment) {
static_assert(Layout<int8_t>::Alignment() == 1, "");
static_assert(Layout<int32_t>::Alignment() == 4, "");
static_assert(Layout<int64_t>::Alignment() == 8, "");
static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
static_assert(Layout<int8_t, int32_t, int64_t>::Alignment() == 8, "");
static_assert(Layout<int8_t, int64_t, int32_t>::Alignment() == 8, "");
static_assert(Layout<int32_t, int8_t, int64_t>::Alignment() == 8, "");
static_assert(Layout<int32_t, int64_t, int8_t>::Alignment() == 8, "");
static_assert(Layout<int64_t, int8_t, int32_t>::Alignment() == 8, "");
static_assert(Layout<int64_t, int32_t, int8_t>::Alignment() == 8, "");
}
TEST(Layout, ConstexprPartial) {
constexpr size_t M = alignof(max_align_t);
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
}
// [from, to)
struct Region {
size_t from;
size_t to;
};
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
#ifdef ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
}
#endif
}
template <size_t N>
void ExpectPoisoned(const unsigned char (&buf)[N],
std::initializer_list<Region> reg) {
size_t prev = 0;
for (const Region& r : reg) {
ExpectRegionPoisoned(buf + prev, r.from - prev, false);
ExpectRegionPoisoned(buf + r.from, r.to - r.from, true);
prev = r.to;
}
ExpectRegionPoisoned(buf + prev, N - prev, false);
}
TEST(Layout, PoisonPadding) {
using L = Layout<int8_t, int64_t, int32_t, Int128>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
constexpr auto x = L::Partial();
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {});
}
{
constexpr auto x = L::Partial(1);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = L::Partial(1, 2);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = L::Partial(1, 2, 3);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr auto x = L::Partial(1, 2, 3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr L x(1, 2, 3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
}
TEST(Layout, DebugString) {
const std::string int64_type =
#ifdef _MSC_VER
"__int64";
#else // _MSC_VER
std::is_same<int64_t, long long>::value ? "long long" : "long"; // NOLINT
#endif // _MSC_VER
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
EXPECT_EQ("@0<signed char>(1)", x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16<" +
int64_type + " [2]>(16)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16<" +
int64_type + " [2]>(16)[4]",
x.DebugString());
}
{
constexpr Layout<int8_t, int32_t, int8_t, Int128> x(1, 2, 3, 4);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16<" +
int64_type + " [2]>(16)[4]",
x.DebugString());
}
}
TEST(Layout, CharTypes) {
constexpr Layout<int32_t> x(1);
alignas(max_align_t) char c[x.AllocSize()] = {};
alignas(max_align_t) unsigned char uc[x.AllocSize()] = {};
alignas(max_align_t) signed char sc[x.AllocSize()] = {};
alignas(max_align_t) const char cc[x.AllocSize()] = {};
alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {};
alignas(max_align_t) const signed char csc[x.AllocSize()] = {};
Type<int32_t*>(x.Pointer<0>(c));
Type<int32_t*>(x.Pointer<0>(uc));
Type<int32_t*>(x.Pointer<0>(sc));
Type<const int32_t*>(x.Pointer<0>(cc));
Type<const int32_t*>(x.Pointer<0>(cuc));
Type<const int32_t*>(x.Pointer<0>(csc));
Type<int32_t*>(x.Pointer<int32_t>(c));
Type<int32_t*>(x.Pointer<int32_t>(uc));
Type<int32_t*>(x.Pointer<int32_t>(sc));
Type<const int32_t*>(x.Pointer<int32_t>(cc));
Type<const int32_t*>(x.Pointer<int32_t>(cuc));
Type<const int32_t*>(x.Pointer<int32_t>(csc));
Type<std::tuple<int32_t*>>(x.Pointers(c));
Type<std::tuple<int32_t*>>(x.Pointers(uc));
Type<std::tuple<int32_t*>>(x.Pointers(sc));
Type<std::tuple<const int32_t*>>(x.Pointers(cc));
Type<std::tuple<const int32_t*>>(x.Pointers(cuc));
Type<std::tuple<const int32_t*>>(x.Pointers(csc));
Type<Span<int32_t>>(x.Slice<0>(c));
Type<Span<int32_t>>(x.Slice<0>(uc));
Type<Span<int32_t>>(x.Slice<0>(sc));
Type<Span<const int32_t>>(x.Slice<0>(cc));
Type<Span<const int32_t>>(x.Slice<0>(cuc));
Type<Span<const int32_t>>(x.Slice<0>(csc));
Type<std::tuple<Span<int32_t>>>(x.Slices(c));
Type<std::tuple<Span<int32_t>>>(x.Slices(uc));
Type<std::tuple<Span<int32_t>>>(x.Slices(sc));
Type<std::tuple<Span<const int32_t>>>(x.Slices(cc));
Type<std::tuple<Span<const int32_t>>>(x.Slices(cuc));
Type<std::tuple<Span<const int32_t>>>(x.Slices(csc));
}
TEST(Layout, ConstElementType) {
constexpr Layout<const int32_t> x(1);
alignas(int32_t) char c[x.AllocSize()] = {};
const char* cc = c;
const int32_t* p = reinterpret_cast<const int32_t*>(cc);
EXPECT_EQ(alignof(int32_t), x.Alignment());
EXPECT_EQ(0, x.Offset<0>());
EXPECT_EQ(0, x.Offset<const int32_t>());
EXPECT_THAT(x.Offsets(), ElementsAre(0));
EXPECT_EQ(1, x.Size<0>());
EXPECT_EQ(1, x.Size<const int32_t>());
EXPECT_THAT(x.Sizes(), ElementsAre(1));
EXPECT_EQ(sizeof(int32_t), x.AllocSize());
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(c)));
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(cc)));
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(c)));
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(cc)));
EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(c)), Tuple(p));
EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(cc)), Tuple(p));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(c)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(cc)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(c)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(cc)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(c)),
Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)),
Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
}
namespace example {
// Immutable move-only string with sizeof equal to sizeof(void*). The string
// size and the characters are kept in the same heap allocation.
class CompactString {
public:
CompactString(const char* s = "") { // NOLINT
const size_t size = strlen(s);
// size_t[1], followed by char[size + 1].
// This statement doesn't allocate memory.
const L layout(1, size + 1);
// AllocSize() tells us how much memory we need to allocate for all our
// data.
p_.reset(new unsigned char[layout.AllocSize()]);
// If running under ASAN, mark the padding bytes, if any, to catch memory
// errors.
layout.PoisonPadding(p_.get());
// Store the size in the allocation.
// Pointer<size_t>() is a synonym for Pointer<0>().
*layout.Pointer<size_t>(p_.get()) = size;
// Store the characters in the allocation.
memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
}
size_t size() const {
// Equivalent to reinterpret_cast<size_t&>(*p).
return *L::Partial().Pointer<size_t>(p_.get());
}
const char* c_str() const {
// Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
// The argument in Partial(1) specifies that we have size_t[1] in front of
// the
// characters.
return L::Partial(1).Pointer<char>(p_.get());
}
private:
// Our heap allocation contains a size_t followed by an array of chars.
using L = Layout<size_t, char>;
std::unique_ptr<unsigned char[]> p_;
};
TEST(CompactString, Works) {
CompactString s = "hello";
EXPECT_EQ(5, s.size());
EXPECT_STREQ("hello", s.c_str());
}
} // namespace example
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Adapts a policy for nodes.
//
// The node policy should model:
//
// struct Policy {
// // Returns a new node allocated and constructed using the allocator, using
// // the specified arguments.
// template <class Alloc, class... Args>
// value_type* new_element(Alloc* alloc, Args&&... args) const;
//
// // Destroys and deallocates node using the allocator.
// template <class Alloc>
// void delete_element(Alloc* alloc, value_type* node) const;
// };
//
// It may also optionally define `value()` and `apply()`. For documentation on
// these, see hash_policy_traits.h.
#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
#include <cassert>
#include <cstddef>
#include <memory>
#include <type_traits>
#include <utility>
namespace absl {
namespace container_internal {
template <class Reference, class Policy>
struct node_hash_policy {
static_assert(std::is_lvalue_reference<Reference>::value, "");
using slot_type = typename std::remove_cv<
typename std::remove_reference<Reference>::type>::type*;
template <class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
*slot = Policy::new_element(alloc, std::forward<Args>(args)...);
}
template <class Alloc>
static void destroy(Alloc* alloc, slot_type* slot) {
Policy::delete_element(alloc, *slot);
}
template <class Alloc>
static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
*new_slot = *old_slot;
}
static size_t space_used(const slot_type* slot) {
if (slot == nullptr) return Policy::element_space_used(nullptr);
return Policy::element_space_used(*slot);
}
static Reference element(slot_type* slot) { return **slot; }
template <class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem)) {
return P::value(elem);
}
template <class... Ts, class P = Policy>
static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
return P::apply(std::forward<Ts>(ts)...);
}
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/node_hash_policy.h"
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_policy_traits.h"
namespace absl {
namespace container_internal {
namespace {
using ::testing::Pointee;
struct Policy : node_hash_policy<int&, Policy> {
using key_type = int;
using init_type = int;
template <class Alloc>
static int* new_element(Alloc* alloc, int value) {
return new int(value);
}
template <class Alloc>
static void delete_element(Alloc* alloc, int* elem) {
delete elem;
}
};
using NodePolicy = hash_policy_traits<Policy>;
struct NodeTest : ::testing::Test {
std::allocator<int> alloc;
int n = 53;
int* a = &n;
};
TEST_F(NodeTest, ConstructDestroy) {
NodePolicy::construct(&alloc, &a, 42);
EXPECT_THAT(a, Pointee(42));
NodePolicy::destroy(&alloc, &a);
}
TEST_F(NodeTest, transfer) {
int s = 42;
int* b = &s;
NodePolicy::transfer(&alloc, &a, &b);
EXPECT_EQ(&s, a);
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
namespace absl {
namespace container_internal {
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
// P is Policy. It's passed as a template argument to support maps that have
// incomplete types as values, as in unordered_map<K, IncompleteType>.
// MappedReference<> may be a non-reference type.
template <class P>
using MappedReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::reference>())));
// MappedConstReference<> may be a non-reference type.
template <class P>
using MappedConstReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::const_reference>())));
public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;
template <typename K>
using key_arg = typename raw_hash_map::raw_hash_set::template key_arg<K>;
static_assert(!std::is_reference<key_type>::value, "");
// TODO(alkis): remove this assertion and verify that reference mapped_type is
// supported.
static_assert(!std::is_reference<mapped_type>::value, "");
using iterator = typename raw_hash_map::raw_hash_set::iterator;
using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
raw_hash_map() {}
using raw_hash_map::raw_hash_set::raw_hash_set;
// The last two template parameters ensure that both arguments are rvalues
// (lvalue arguments are handled by the overloads below). This is necessary
// for supporting bitfield arguments.
//
// union { int n : 1; };
// flat_hash_map<int, int> m;
// m.insert_or_assign(n, n);
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
}
template <class K = key_type, class V = mapped_type, K* = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
return insert_or_assign_impl(std::forward<K>(k), v);
}
template <class K = key_type, class V = mapped_type, V* = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
return insert_or_assign_impl(k, std::forward<V>(v));
}
template <class K = key_type, class V = mapped_type>
std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
return insert_or_assign_impl(k, v);
}
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
}
template <class K = key_type, class V = mapped_type, K* = nullptr>
iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
return insert_or_assign(std::forward<K>(k), v).first;
}
template <class K = key_type, class V = mapped_type, V* = nullptr>
iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
return insert_or_assign(k, std::forward<V>(v)).first;
}
template <class K = key_type, class V = mapped_type>
iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
return insert_or_assign(k, v).first;
}
template <class K = key_type, class... Args,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0,
K* = nullptr>
std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
template <class K = key_type, class... Args,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0>
std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
return try_emplace_impl(k, std::forward<Args>(args)...);
}
template <class K = key_type, class... Args, K* = nullptr>
iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
}
template <class K = key_type, class... Args>
iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
return try_emplace(k, std::forward<Args>(args)...).first;
}
template <class K = key_type, class P = Policy>
MappedReference<P> at(const key_arg<K>& key) {
auto it = this->find(key);
if (it == this->end()) std::abort();
return Policy::value(&*it);
}
template <class K = key_type, class P = Policy>
MappedConstReference<P> at(const key_arg<K>& key) const {
auto it = this->find(key);
if (it == this->end()) std::abort();
return Policy::value(&*it);
}
template <class K = key_type, class P = Policy, K* = nullptr>
MappedReference<P> operator[](key_arg<K>&& key) {
return Policy::value(&*try_emplace(std::forward<K>(key)).first);
}
template <class K = key_type, class P = Policy>
MappedReference<P> operator[](const key_arg<K>& key) {
return Policy::value(&*try_emplace(key).first);
}
private:
template <class K, class V>
std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
else
Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
return {this->iterator_at(res.first), res.second};
}
template <class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
return {this->iterator_at(res.first), res.second};
}
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/raw_hash_set.h"
#include <cstddef>
#include "absl/base/config.h"
namespace absl {
namespace container_internal {
constexpr size_t Group::kWidth;
// Returns "random" seed.
inline size_t RandomSeed() {
#if ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
static std::atomic<size_t> counter;
size_t value = counter.fetch_add(1, std::memory_order_relaxed);
#endif // ABSL_HAVE_THREAD_LOCAL
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// An open-addressing
// hashtable with quadratic probing.
//
// This is a low level hashtable on top of which different interfaces can be
// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
//
// The table interface is similar to that of std::unordered_set. Notable
// differences are that most member functions support heterogeneous keys when
// BOTH the hash and eq functions are marked as transparent. They do so by
// providing a typedef called `is_transparent`.
//
// When heterogeneous lookup is enabled, functions that take key_type act as if
// they have an overload set like:
//
// iterator find(const key_type& key);
// template <class K>
// iterator find(const K& key);
//
// size_type erase(const key_type& key);
// template <class K>
// size_type erase(const K& key);
//
// std::pair<iterator, iterator> equal_range(const key_type& key);
// template <class K>
// std::pair<iterator, iterator> equal_range(const K& key);
//
// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
// exist.
//
// find() also supports passing the hash explicitly:
//
// iterator find(const key_type& key, size_t hash);
// template <class U>
// iterator find(const U& key, size_t hash);
//
// In addition the pointer to element and iterator stability guarantees are
// weaker: all iterators and pointers are invalidated after a new element is
// inserted.
//
// IMPLEMENTATION DETAILS
//
// The table stores elements inline in a slot array. In addition to the slot
// array the table maintains some control state per slot. The extra state is one
// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
// the hash of an occupied slot. The table is split into logical groups of
// slots, like so:
//
// Group 1 Group 2 Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
// On lookup the hash is split into two parts:
// - H2: 7 bits (those stored in the control bytes)
// - H1: the rest of the bits
// The groups are probed using H1. For each group the slots are matched to H2 in
// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
//
// On insert, once the right group is found (as in lookup), its slots are
// filled in order.
//
// On erase a slot is cleared. In case the group did not have any empty slots
// before the erase, the erased slot is marked as deleted.
//
// Groups without empty slots (but maybe with deleted slots) extend the probe
// sequence. The probing algorithm is quadratic. Given N the number of groups,
// the probing function for the i'th probe is:
//
// P(0) = H1 % N
//
// P(i) = (P(i - 1) + i) % N
//
// This probing function guarantees that after N probes, all the groups of the
// table will be probed exactly once.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#ifndef SWISSTABLE_HAVE_SSE2
#ifdef __SSE2__
#define SWISSTABLE_HAVE_SSE2 1
#else
#define SWISSTABLE_HAVE_SSE2 0
#endif
#endif
#ifndef SWISSTABLE_HAVE_SSSE3
#ifdef __SSSE3__
#define SWISSTABLE_HAVE_SSSE3 1
#else
#define SWISSTABLE_HAVE_SSSE3 0
#endif
#endif
#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
#error "Bad configuration!"
#endif
#if SWISSTABLE_HAVE_SSE2
#include <x86intrin.h>
#endif
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
#include "absl/base/port.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/types/optional.h"
#include "absl/utility/utility.h"
namespace absl {
namespace container_internal {
template <size_t Width>
class probe_seq {
public:
probe_seq(size_t hash, size_t mask) {
assert(((mask + 1) & mask) == 0 && "not a mask");
mask_ = mask;
offset_ = hash & mask_;
}
size_t offset() const { return offset_; }
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
void next() {
index_ += Width;
offset_ += index_;
offset_ &= mask_;
}
// 0-based probe index. The i-th probe in the probe sequence.
size_t index() const { return index_; }
private:
size_t mask_;
size_t offset_;
size_t index_ = 0;
};
template <class ContainerKey, class Hash, class Eq>
struct RequireUsableKey {
template <class PassedKey, class... Args>
std::pair<
decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
std::declval<const PassedKey&>()))>*
operator()(const PassedKey&, const Args&...) const;
};
template <class E, class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable : std::false_type {};
template <class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable<
absl::void_t<decltype(
Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
template <class, class = void>
struct IsTransparent : std::false_type {};
template <class T>
struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
: std::true_type {};
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
constexpr bool IsNoThrowSwappable() {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
template <typename T>
int TrailingZeros(T x) {
return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(x)
: base_internal::CountTrailingZerosNonZero32(x);
}
template <typename T>
int LeadingZeros(T x) {
return sizeof(T) == 8 ? base_internal::CountLeadingZeros64(x)
: base_internal::CountLeadingZeros32(x);
}
// An abstraction over a bitmask. It provides an easy way to iterate through the
// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
// this is a true bitmask. On non-SSE, platforms the arithematic used to
// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
// either 0x00 or 0x80.
//
// For example:
// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
template <class T, int SignificantBits, int Shift = 0>
class BitMask {
static_assert(std::is_unsigned<T>::value, "");
static_assert(Shift == 0 || Shift == 3, "");
public:
// These are useful for unit tests (gunit).
using value_type = int;
using iterator = BitMask;
using const_iterator = BitMask;
explicit BitMask(T mask) : mask_(mask) {}
BitMask& operator++() {
mask_ &= (mask_ - 1);
return *this;
}
explicit operator bool() const { return mask_ != 0; }
int operator*() const { return LowestBitSet(); }
int LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
int HighestBitSet() const {
return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
1) >>
Shift;
}
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
int TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
int LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
}
private:
friend bool operator==(const BitMask& a, const BitMask& b) {
return a.mask_ == b.mask_;
}
friend bool operator!=(const BitMask& a, const BitMask& b) {
return a.mask_ != b.mask_;
}
T mask_;
};
using ctrl_t = signed char;
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
// below for details.
enum Ctrl : ctrl_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
kSentinel = -1, // 0b11111111
};
static_assert(
kEmpty & kDeleted & kSentinel & 0x80,
"Special markers need to have the MSB to make checking for them efficient");
static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
"kEmpty and kDeleted must be smaller than kSentinel to make the "
"SIMD test of IsEmptyOrDeleted() efficient");
static_assert(kSentinel == -1,
"kSentinel must be -1 to elide loading it from memory into SIMD "
"registers (pcmpeqd xmm, xmm)");
static_assert(kEmpty == -128,
"kEmpty must be -128 to make the SIMD check for its "
"existence efficient (psignb xmm, xmm)");
static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
"kEmpty and kDeleted must share an unset bit that is not shared "
"by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
"efficient");
static_assert(kDeleted == -2,
"kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
// A single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find().
inline ctrl_t* EmptyGroup() {
alignas(16) static constexpr ctrl_t empty_group[] = {
kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
return const_cast<ctrl_t*>(empty_group);
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
// Returns a hash seed.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
inline size_t HashSeed(const ctrl_t* ctrl) {
// The low bits of the pointer have little or no entropy because of
// alignment. We shift the pointer to try to use higher entropy bits. A
// good number seems to be 12 bits, because that aligns with page size.
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
}
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
return (hash >> 7) ^ HashSeed(ctrl);
}
inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
inline bool IsFull(ctrl_t c) { return c >= 0; }
inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
#if SWISSTABLE_HAVE_SSE2
struct Group {
static constexpr size_t kWidth = 16; // the number of slots per group
explicit Group(const ctrl_t* pos) {
ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
}
// Returns a bitmask representing the positions of slots that match hash.
BitMask<uint32_t, kWidth> Match(h2_t hash) const {
auto match = _mm_set1_epi8(hash);
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
}
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
#if SWISSTABLE_HAVE_SSSE3
// This only works because kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
#else
return Match(kEmpty);
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
auto special = _mm_set1_epi8(kSentinel);
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_cmpgt_epi8(special, ctrl)));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(kSentinel);
return TrailingZeros(_mm_movemask_epi8(_mm_cmpgt_epi8(special, ctrl)) + 1);
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(0x80);
auto x126 = _mm_set1_epi8(126);
#if SWISSTABLE_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
auto special_mask = _mm_cmpgt_epi8(zero, ctrl);
auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
#endif
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
}
__m128i ctrl;
};
#else
struct Group {
static constexpr size_t kWidth = 8;
explicit Group(const ctrl_t* pos) : ctrl(little_endian::Load64(pos)) {}
BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
// For the technique, see:
// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
// (Determine if a word has a byte equal to n).
//
// Caveat: there are false positives but:
// - they only occur if there is a real match
// - they never occur on kEmpty, kDeleted, kSentinel
// - they will be handled gracefully by subsequent checks in code
//
// Example:
// v = 0x1716151413121110
// hash = 0x12
// retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
constexpr uint64_t msbs = 0x8080808080808080ULL;
constexpr uint64_t lsbs = 0x0101010101010101ULL;
auto x = ctrl ^ (lsbs * hash);
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
}
BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
}
BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
}
uint32_t CountLeadingEmptyOrDeleted() const {
constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
constexpr uint64_t lsbs = 0x0101010101010101ULL;
auto x = ctrl & msbs;
auto res = (~x + (x >> 7)) & ~lsbs;
little_endian::Store64(dst, res);
}
uint64_t ctrl;
};
#endif // SWISSTABLE_HAVE_SSE2
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
inline bool IsValidCapacity(size_t n) {
return ((n + 1) & n) == 0 && n >= Group::kWidth - 1;
}
// PRECONDITION:
// IsValidCapacity(capacity)
// ctrl[capacity] == kSentinel
// ctrl[i] != kSentinel for all i < capacity
// Applies mapping for every byte in ctrl:
// DELETED -> EMPTY
// EMPTY -> EMPTY
// FULL -> DELETED
inline void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
ctrl[capacity] = kSentinel;
}
// Rounds up the capacity to the next power of 2 minus 1 and ensures it is
// greater or equal to Group::kWidth - 1.
inline size_t NormalizeCapacity(size_t n) {
constexpr size_t kMinCapacity = Group::kWidth - 1;
return n <= kMinCapacity
? kMinCapacity
: std::numeric_limits<size_t>::max() >> LeadingZeros(n);
}
// The node_handle concept from C++17.
// We specialize node_handle for sets and maps. node_handle_base holds the
// common API of both.
template <typename Policy, typename Alloc>
class node_handle_base {
protected:
using PolicyTraits = hash_policy_traits<Policy>;
using slot_type = typename PolicyTraits::slot_type;
public:
using allocator_type = Alloc;
constexpr node_handle_base() {}
node_handle_base(node_handle_base&& other) noexcept {
*this = std::move(other);
}
~node_handle_base() { destroy(); }
node_handle_base& operator=(node_handle_base&& other) {
destroy();
if (!other.empty()) {
alloc_ = other.alloc_;
PolicyTraits::transfer(alloc(), slot(), other.slot());
other.reset();
}
return *this;
}
bool empty() const noexcept { return !alloc_; }
explicit operator bool() const noexcept { return !empty(); }
allocator_type get_allocator() const { return *alloc_; }
protected:
template <typename, typename, typename, typename>
friend class raw_hash_set;
node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) {
PolicyTraits::transfer(alloc(), slot(), s);
}
void destroy() {
if (!empty()) {
PolicyTraits::destroy(alloc(), slot());
reset();
}
}
void reset() {
assert(alloc_.has_value());
alloc_ = absl::nullopt;
}
slot_type* slot() const {
assert(!empty());
return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
}
allocator_type* alloc() { return std::addressof(*alloc_); }
private:
absl::optional<allocator_type> alloc_;
mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
slot_space_;
};
// For sets.
template <typename Policy, typename Alloc, typename = void>
class node_handle : public node_handle_base<Policy, Alloc> {
using Base = typename node_handle::node_handle_base;
public:
using value_type = typename Base::PolicyTraits::value_type;
constexpr node_handle() {}
value_type& value() const {
return Base::PolicyTraits::element(this->slot());
}
private:
template <typename, typename, typename, typename>
friend class raw_hash_set;
node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
};
// For maps.
template <typename Policy, typename Alloc>
class node_handle<Policy, Alloc, absl::void_t<typename Policy::mapped_type>>
: public node_handle_base<Policy, Alloc> {
using Base = typename node_handle::node_handle_base;
public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;
constexpr node_handle() {}
auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) {
return Base::PolicyTraits::key(this->slot());
}
mapped_type& mapped() const {
return Base::PolicyTraits::value(
&Base::PolicyTraits::element(this->slot()));
}
private:
template <typename, typename, typename, typename>
friend class raw_hash_set;
node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
};
// Implement the insert_return_type<> concept of C++17.
template <class Iterator, class NodeType>
struct insert_return_type {
Iterator position;
bool inserted;
NodeType node;
};
// Helper trait to allow or disallow arbitrary keys when the hash and
// eq functions are transparent.
// It is very important that the inner template is an alias and that the type it
// produces is not a dependent type. Otherwise, type deduction would fail.
template <bool is_transparent>
struct KeyArg {
// Transparent. Forward `K`.
template <typename K, typename key_type>
using type = K;
};
template <>
struct KeyArg<false> {
// Not transparent. Always use `key_type`.
template <typename K, typename key_type>
using type = key_type;
};
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
//
// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
// functor should accept a key and return size_t as hash. For best performance
// it is important that the hash function provides high entropy across all bits
// of the hash.
//
// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
// should accept two (of possibly different type) keys and return a bool: true
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set {
using PolicyTraits = hash_policy_traits<Policy>;
using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
IsTransparent<Hash>::value>;
public:
using init_type = typename PolicyTraits::init_type;
using key_type = typename PolicyTraits::key_type;
// TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
// code fixes!
using slot_type = typename PolicyTraits::slot_type;
using allocator_type = Alloc;
using size_type = size_t;
using difference_type = ptrdiff_t;
using hasher = Hash;
using key_equal = Eq;
using policy_type = Policy;
using value_type = typename PolicyTraits::value_type;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = typename absl::allocator_traits<
allocator_type>::template rebind_traits<value_type>::pointer;
using const_pointer = typename absl::allocator_traits<
allocator_type>::template rebind_traits<value_type>::const_pointer;
// Alias used for heterogeneous lookup functions.
// `key_arg<K>` evaluates to `K` when the functors are tranparent and to
// `key_type` otherwise. It permits template argument deduction on `K` for the
// transparent case.
template <class K>
using key_arg = typename KeyArgImpl::template type<K, key_type>;
private:
// Give an early error when key_type is not hashable/eq.
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
static Layout MakeLayout(size_t capacity) {
assert(IsValidCapacity(capacity));
return Layout(capacity + Group::kWidth + 1, capacity);
}
using AllocTraits = absl::allocator_traits<allocator_type>;
using SlotAlloc = typename absl::allocator_traits<
allocator_type>::template rebind_alloc<slot_type>;
using SlotAllocTraits = typename absl::allocator_traits<
allocator_type>::template rebind_traits<slot_type>;
static_assert(std::is_lvalue_reference<reference>::value,
"Policy::element() must return a reference");
template <typename T>
struct SameAsElementReference
: std::is_same<typename std::remove_cv<
typename std::remove_reference<reference>::type>::type,
typename std::remove_cv<
typename std::remove_reference<T>::type>::type> {};
// An enabler for insert(T&&): T must be convertible to init_type or be the
// same as [cv] value_type [ref].
// Note: we separate SameAsElementReference into its own type to avoid using
// reference unless we need to. MSVC doesn't seem to like it in some
// cases.
template <class T>
using RequiresInsertable = typename std::enable_if<
absl::disjunction<std::is_convertible<T, init_type>,
SameAsElementReference<T>>::value,
int>::type;
// RequiresNotInit is a workaround for gcc prior to 7.1.
// See https://godbolt.org/g/Y4xsUh.
template <class T>
using RequiresNotInit =
typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
template <class... Ts>
using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
public:
static_assert(std::is_same<pointer, value_type*>::value,
"Allocators with custom pointer types are not supported");
static_assert(std::is_same<const_pointer, const value_type*>::value,
"Allocators with custom pointer types are not supported");
class iterator {
friend class raw_hash_set;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = typename raw_hash_set::value_type;
using reference =
absl::conditional_t<PolicyTraits::constant_iterators::value,
const value_type&, value_type&>;
using pointer = absl::remove_reference_t<reference>*;
using difference_type = typename raw_hash_set::difference_type;
iterator() {}
// PRECONDITION: not an end() iterator.
reference operator*() const { return PolicyTraits::element(slot_); }
// PRECONDITION: not an end() iterator.
pointer operator->() const { return &operator*(); }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
++ctrl_;
++slot_;
skip_empty_or_deleted();
return *this;
}
// PRECONDITION: not an end() iterator.
iterator operator++(int) {
auto tmp = *this;
++*this;
return tmp;
}
friend bool operator==(const iterator& a, const iterator& b) {
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
return !(a == b);
}
private:
iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
// ctrl is not necessarily aligned to Group::kWidth. It is also likely
// to read past the space for ctrl bytes and into slots. This is ok
// because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
// is no way to read outside the combined slot array.
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
}
ctrl_t* ctrl_ = nullptr;
slot_type* slot_;
};
class const_iterator {
friend class raw_hash_set;
public:
using iterator_category = typename iterator::iterator_category;
using value_type = typename raw_hash_set::value_type;
using reference = typename raw_hash_set::const_reference;
using pointer = typename raw_hash_set::const_pointer;
using difference_type = typename raw_hash_set::difference_type;
const_iterator() {}
// Implicit construction from iterator.
const_iterator(iterator i) : inner_(std::move(i)) {}
reference operator*() const { return *inner_; }
pointer operator->() const { return inner_.operator->(); }
const_iterator& operator++() {
++inner_;
return *this;
}
const_iterator operator++(int) { return inner_++; }
friend bool operator==(const const_iterator& a, const const_iterator& b) {
return a.inner_ == b.inner_;
}
friend bool operator!=(const const_iterator& a, const const_iterator& b) {
return !(a == b);
}
private:
const_iterator(const ctrl_t* ctrl, const slot_type* slot)
: inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
iterator inner_;
};
using node_type = container_internal::node_handle<Policy, Alloc>;
raw_hash_set() noexcept(
std::is_nothrow_default_constructible<hasher>::value&&
std::is_nothrow_default_constructible<key_equal>::value&&
std::is_nothrow_default_constructible<allocator_type>::value) {}
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
initialize_slots();
}
}
raw_hash_set(size_t bucket_count, const hasher& hash,
const allocator_type& alloc)
: raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
raw_hash_set(size_t bucket_count, const allocator_type& alloc)
: raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
explicit raw_hash_set(const allocator_type& alloc)
: raw_hash_set(0, hasher(), key_equal(), alloc) {}
template <class InputIter>
raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: raw_hash_set(bucket_count, hash, eq, alloc) {
insert(first, last);
}
template <class InputIter>
raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
const hasher& hash, const allocator_type& alloc)
: raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
template <class InputIter>
raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
const allocator_type& alloc)
: raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
template <class InputIter>
raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
: raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
// Instead of accepting std::initializer_list<value_type> as the first
// argument like std::unordered_set<value_type> does, we have two overloads
// that accept std::initializer_list<T> and std::initializer_list<init_type>.
// This is advantageous for performance.
//
// // Turns {"abc", "def"} into std::initializer_list<std::string>, then copies
// // the strings into the set.
// std::unordered_set<std::string> s = {"abc", "def"};
//
// // Turns {"abc", "def"} into std::initializer_list<const char*>, then
// // copies the strings into the set.
// absl::flat_hash_set<std::string> s = {"abc", "def"};
//
// The same trick is used in insert().
//
// The enabler is necessary to prevent this constructor from triggering where
// the copy constructor is meant to be called.
//
// absl::flat_hash_set<int> a, b{a};
//
// RequiresNotInit<T> is a workaround for gcc prior to 7.1.
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
const hasher& hash, const allocator_type& alloc)
: raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
const hasher& hash, const allocator_type& alloc)
: raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
const allocator_type& alloc)
: raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
const allocator_type& alloc)
: raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
: raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
raw_hash_set(std::initializer_list<init_type> init,
const allocator_type& alloc)
: raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
raw_hash_set(const raw_hash_set& that)
: raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
that.alloc_ref())) {}
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
: raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
reserve(that.size());
// Because the table is guaranteed to be empty, we can do something faster
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
const size_t i = find_first_non_full(hash);
set_ctrl(i, H2(hash));
emplace_at(i, v);
}
size_ = that.size();
growth_left() -= that.size();
}
raw_hash_set(raw_hash_set&& that) noexcept(
std::is_nothrow_copy_constructible<hasher>::value&&
std::is_nothrow_copy_constructible<key_equal>::value&&
std::is_nothrow_copy_constructible<allocator_type>::value)
: ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, 0)),
capacity_(absl::exchange(that.capacity_, 0)),
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
settings_(that.settings_) {
// growth_left was copied above, reset the one from `that`.
that.growth_left() = 0;
}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),
slots_(nullptr),
size_(0),
capacity_(0),
settings_(0, that.hash_ref(), that.eq_ref(), a) {
if (a == that.alloc_ref()) {
std::swap(ctrl_, that.ctrl_);
std::swap(slots_, that.slots_);
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
// moving them. This can be fixed if it ever becomes an issue.
for (auto& elem : that) insert(std::move(elem));
}
}
raw_hash_set& operator=(const raw_hash_set& that) {
raw_hash_set tmp(that,
AllocTraits::propagate_on_container_copy_assignment::value
? that.alloc_ref()
: alloc_ref());
swap(tmp);
return *this;
}
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
absl::allocator_traits<allocator_type>::is_always_equal::value&&
std::is_nothrow_move_assignable<hasher>::value&&
std::is_nothrow_move_assignable<key_equal>::value) {
// TODO(sbenza): We should only use the operations from the noexcept clause
// to make sure we actually adhere to that contract.
return move_assign(
std::move(that),
typename AllocTraits::propagate_on_container_move_assignment());
}
~raw_hash_set() { destroy_slots(); }
iterator begin() {
auto it = iterator_at(0);
it.skip_empty_or_deleted();
return it;
}
iterator end() { return {ctrl_ + capacity_}; }
const_iterator begin() const {
return const_cast<raw_hash_set*>(this)->begin();
}
const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
bool empty() const { return !size(); }
size_t size() const { return size_; }
size_t capacity() const { return capacity_; }
size_t max_size() const { return std::numeric_limits<size_t>::max(); }
void clear() {
// Iterating over this container is O(bucket_count()). When bucket_count()
// is much greater than size(), iteration becomes prohibitively expensive.
// For clear() it is more important to reuse the allocated array when the
// container is small because allocation takes comparatively long time
// compared to destruction of the elements of the container. So we pick the
// largest bucket_count() threshold for which iteration is still fast and
// past that we simply deallocate the array.
if (capacity_ > 127) {
destroy_slots();
} else if (capacity_) {
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
size_ = 0;
reset_ctrl();
growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
}
assert(empty());
}
// This overload kicks in when the argument is an rvalue of insertable and
// decomposable type other than init_type.
//
// flat_hash_map<std::string, int> m;
// m.insert(std::make_pair("abc", 42));
template <class T, RequiresInsertable<T> = 0,
typename std::enable_if<IsDecomposable<T>::value, int>::type = 0,
T* = nullptr>
std::pair<iterator, bool> insert(T&& value) {
return emplace(std::forward<T>(value));
}
// This overload kicks in when the argument is a bitfield or an lvalue of
// insertable and decomposable type.
//
// union { int n : 1; };
// flat_hash_set<int> s;
// s.insert(n);
//
// flat_hash_set<std::string> s;
// const char* p = "hello";
// s.insert(p);
//
// TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
// RequiresInsertable<T> with RequiresInsertable<const T&>.
// We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
class T, RequiresInsertable<T> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
std::pair<iterator, bool> insert(const T& value) {
return emplace(value);
}
// This overload kicks in when the argument is an rvalue of init_type. Its
// purpose is to handle brace-init-list arguments.
//
// flat_hash_set<std::string, int> s;
// s.insert({"abc", 42});
std::pair<iterator, bool> insert(init_type&& value) {
return emplace(std::move(value));
}
template <class T, RequiresInsertable<T> = 0,
typename std::enable_if<IsDecomposable<T>::value, int>::type = 0,
T* = nullptr>
iterator insert(const_iterator, T&& value) {
return insert(std::forward<T>(value)).first;
}
// TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
// RequiresInsertable<T> with RequiresInsertable<const T&>.
// We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
class T, RequiresInsertable<T> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
iterator insert(const_iterator, const T& value) {
return insert(value).first;
}
iterator insert(const_iterator, init_type&& value) {
return insert(std::move(value)).first;
}
template <class InputIt>
void insert(InputIt first, InputIt last) {
for (; first != last; ++first) insert(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
void insert(std::initializer_list<T> ilist) {
insert(ilist.begin(), ilist.end());
}
void insert(std::initializer_list<init_type> ilist) {
insert(ilist.begin(), ilist.end());
}
insert_return_type<iterator, node_type> insert(node_type&& node) {
if (!node) return {end(), false, node_type()};
const auto& elem = PolicyTraits::element(node.slot());
auto res = PolicyTraits::apply(
InsertSlot<false>{*this, std::move(*node.slot())}, elem);
if (res.second) {
node.reset();
return {res.first, true, node_type()};
} else {
return {res.first, false, std::move(node)};
}
}
iterator insert(const_iterator, node_type&& node) {
return insert(std::move(node)).first;
}
// This overload kicks in if we can deduce the key from args. This enables us
// to avoid constructing value_type if an entry with the same key already
// exists.
//
// For example:
//
// flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
// // Creates no std::string copies and makes no heap allocations.
// m.emplace("abc", "xyz");
template <class... Args, typename std::enable_if<
IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace(Args&&... args) {
return PolicyTraits::apply(EmplaceDecomposable{*this},
std::forward<Args>(args)...);
}
// This overload kicks in if we cannot deduce the key from args. It constructs
// value_type unconditionally and then either moves it into the table or
// destroys.
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace(Args&&... args) {
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
const auto& elem = PolicyTraits::element(slot);
return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
}
template <class... Args>
iterator emplace_hint(const_iterator, Args&&... args) {
return emplace(std::forward<Args>(args)...).first;
}
// Extension API: support for lazy emplace.
//
// Looks up key in the table. If found, returns the iterator to the element.
// Otherwise calls f with one argument of type raw_hash_set::constructor. f
// MUST call raw_hash_set::constructor with arguments as if a
// raw_hash_set::value_type is constructed, otherwise the behavior is
// undefined.
//
// For example:
//
// std::unordered_set<ArenaString> s;
// // Makes ArenaStr even if "abc" is in the map.
// s.insert(ArenaString(&arena, "abc"));
//
// flat_hash_set<ArenaStr> s;
// // Makes ArenaStr only if "abc" is not in the map.
// s.lazy_emplace("abc", [&](const constructor& ctor) {
// ctor(&arena, "abc");
// });
//
// WARNING: This API is currently experimental. If there is a way to implement
// the same thing with the rest of the API, prefer that.
class constructor {
friend class raw_hash_set;
public:
template <class... Args>
void operator()(Args&&... args) const {
assert(*slot_);
PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
*slot_ = nullptr;
}
private:
constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
allocator_type* alloc_;
slot_type** slot_;
};
template <class K = key_type, class F>
iterator lazy_emplace(const key_arg<K>& key, F&& f) {
auto res = find_or_prepare_insert(key);
if (res.second) {
slot_type* slot = slots_ + res.first;
std::forward<F>(f)(constructor(&alloc_ref(), &slot));
assert(!slot);
}
return iterator_at(res.first);
}
// Extension API: support for heterogeneous keys.
//
// std::unordered_set<std::string> s;
// // Turns "abc" into std::string.
// s.erase("abc");
//
// flat_hash_set<std::string> s;
// // Uses "abc" directly without copying it into std::string.
// s.erase("abc");
template <class K = key_type>
size_type erase(const key_arg<K>& key) {
auto it = find(key);
if (it == end()) return 0;
erase(it);
return 1;
}
// Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
// this method returns void to reduce algorithmic complexity to O(1). In
// order to erase while iterating across a map, use the following idiom (which
// also works for standard containers):
//
// for (auto it = m.begin(), end = m.end(); it != end;) {
// if (<pred>) {
// m.erase(it++);
// } else {
// ++it;
// }
// }
void erase(const_iterator cit) { erase(cit.inner_); }
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
assert(it != end());
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
iterator erase(const_iterator first, const_iterator last) {
while (first != last) {
erase(first++);
}
return last.inner_;
}
// Moves elements from `src` into `this`.
// If the element already exists in `this`, it is left unmodified in `src`.
template <typename H, typename E>
void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
assert(this != &src);
for (auto it = src.begin(), e = src.end(); it != e; ++it) {
if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
PolicyTraits::element(it.slot_))
.second) {
src.erase_meta_only(it);
}
}
}
template <typename H, typename E>
void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
merge(src);
}
node_type extract(const_iterator position) {
node_type node(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
return node;
}
template <
class K = key_type,
typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
node_type extract(const key_arg<K>& key) {
auto it = find(key);
return it == end() ? node_type() : extract(const_iterator{it});
}
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
(!AllocTraits::propagate_on_container_swap::value ||
IsNoThrowSwappable<allocator_type>())) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
swap(size_, that.size_);
swap(capacity_, that.capacity_);
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
if (AllocTraits::propagate_on_container_swap::value) {
swap(alloc_ref(), that.alloc_ref());
} else {
// If the allocators do not compare equal it is officially undefined
// behavior. We choose to do nothing.
}
}
void rehash(size_t n) {
if (n == 0 && capacity_ == 0) return;
if (n == 0 && size_ == 0) return destroy_slots();
auto m = NormalizeCapacity(std::max(
n, static_cast<size_t>(std::ceil(size() / kMaxLoadFactor))));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
}
}
void reserve(size_t n) {
rehash(static_cast<size_t>(std::ceil(n / kMaxLoadFactor)));
}
// Extension API: support for heterogeneous keys.
//
// std::unordered_set<std::string> s;
// // Turns "abc" into std::string.
// s.count("abc");
//
// ch_set<std::string> s;
// // Uses "abc" directly without copying it into std::string.
// s.count("abc");
template <class K = key_type>
size_t count(const key_arg<K>& key) const {
return find(key) == end() ? 0 : 1;
}
// Issues CPU prefetch instructions for the memory needed to find or insert
// a key. Like all lookup functions, this support heterogeneous keys.
//
// NOTE: This is a very low level operation and should not be used without
// specific benchmarks indicating its importance.
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
(void)key;
#if defined(__GNUC__)
auto seq = probe(hash_ref()(key));
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__
}
// The API of find() has two extensions.
//
// 1. The hash can be passed by the user. It must be equal to the hash of the
// key.
//
// 2. The type of the key argument doesn't have to be key_type. This is so
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
}
}
template <class K = key_type>
iterator find(const key_arg<K>& key) {
return find(key, hash_ref()(key));
}
template <class K = key_type>
const_iterator find(const key_arg<K>& key, size_t hash) const {
return const_cast<raw_hash_set*>(this)->find(key, hash);
}
template <class K = key_type>
const_iterator find(const key_arg<K>& key) const {
return find(key, hash_ref()(key));
}
template <class K = key_type>
bool contains(const key_arg<K>& key) const {
return find(key) != end();
}
template <class K = key_type>
std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
}
template <class K = key_type>
std::pair<const_iterator, const_iterator> equal_range(
const key_arg<K>& key) const {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
}
size_t bucket_count() const { return capacity_; }
float load_factor() const {
return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
}
float max_load_factor() const { return 1.0f; }
void max_load_factor(float) {
// Does nothing.
}
hasher hash_function() const { return hash_ref(); }
key_equal key_eq() const { return eq_ref(); }
allocator_type get_allocator() const { return alloc_ref(); }
friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
if (a.size() != b.size()) return false;
const raw_hash_set* outer = &a;
const raw_hash_set* inner = &b;
if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
for (const value_type& elem : *outer)
if (!inner->has_element(elem)) return false;
return true;
}
friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
return !(a == b);
}
friend void swap(raw_hash_set& a,
raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
a.swap(b);
}
private:
template <class Container, typename Enabler>
friend struct absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess;
struct FindElement {
template <class K, class... Args>
const_iterator operator()(const K& key, Args&&...) const {
return s.find(key);
}
const raw_hash_set& s;
};
struct HashElement {
template <class K, class... Args>
size_t operator()(const K& key, Args&&...) const {
return h(key);
}
const hasher& h;
};
template <class K1>
struct EqualElement {
template <class K2, class... Args>
bool operator()(const K2& lhs, Args&&...) const {
return eq(lhs, rhs);
}
const K1& rhs;
const key_equal& eq;
};
struct EmplaceDecomposable {
template <class K, class... Args>
std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
auto res = s.find_or_prepare_insert(key);
if (res.second) {
s.emplace_at(res.first, std::forward<Args>(args)...);
}
return {s.iterator_at(res.first), res.second};
}
raw_hash_set& s;
};
template <bool do_destroy>
struct InsertSlot {
template <class K, class... Args>
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
auto res = s.find_or_prepare_insert(key);
if (res.second) {
PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
} else if (do_destroy) {
PolicyTraits::destroy(&s.alloc_ref(), &slot);
}
return {s.iterator_at(res.first), res.second};
}
raw_hash_set& s;
// Constructed slot. Either moved into place or destroyed.
slot_type&& slot;
};
// "erases" the object from the container, except that it doesn't actually
// destroy the object. It only updates all the metadata of the class.
// This can be used in conjunction with Policy::transfer to move the object to
// another place.
void erase_meta_only(const_iterator it) {
assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
--size_;
const size_t index = it.inner_.ctrl_ - ctrl_;
const size_t index_before = (index - Group::kWidth) & capacity_;
const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
// window that might have seen a full group.
bool was_never_full =
empty_before && empty_after &&
static_cast<size_t>(empty_after.TrailingZeros() +
empty_before.LeadingZeros()) < Group::kWidth;
set_ctrl(index, was_never_full ? kEmpty : kDeleted);
growth_left() += was_never_full;
}
void initialize_slots() {
assert(capacity_);
auto layout = MakeLayout(capacity_);
char* mem = static_cast<char*>(
Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
slots_ = layout.template Pointer<1>(mem);
reset_ctrl();
growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
}
void destroy_slots() {
if (!capacity_) return;
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
auto layout = MakeLayout(capacity_);
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
ctrl_ = EmptyGroup();
slots_ = nullptr;
size_ = 0;
capacity_ = 0;
growth_left() = 0;
}
void resize(size_t new_capacity) {
assert(IsValidCapacity(new_capacity));
auto* old_ctrl = ctrl_;
auto* old_slots = slots_;
const size_t old_capacity = capacity_;
capacity_ = new_capacity;
initialize_slots();
for (size_t i = 0; i != old_capacity; ++i) {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
size_t new_i = find_first_non_full(hash);
set_ctrl(new_i, H2(hash));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
}
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
auto layout = MakeLayout(old_capacity);
Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
layout.AllocSize());
}
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
// - for each slot marked as DELETED
// hash = Hash(element)
// target = find_first_non_full(hash)
// if target is in the same group
// mark slot as FULL
// else if target is EMPTY
// transfer element to target
// mark slot as EMPTY
// mark target as FULL
// else if target is DELETED
// swap current element with target element
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i));
size_t new_i = find_first_non_full(hash);
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
const auto probe_index = [&](size_t pos) {
return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
};
// Element doesn't move.
if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
set_ctrl(i, H2(hash));
continue;
}
if (IsEmpty(ctrl_[new_i])) {
// Transfer element to the empty spot.
// set_ctrl poisons/unpoisons the slots so we have to call it at the
// right time.
set_ctrl(new_i, H2(hash));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
set_ctrl(i, kEmpty);
} else {
assert(IsDeleted(ctrl_[new_i]));
set_ctrl(new_i, H2(hash));
// Until we are done rehashing, DELETED marks previously FULL slots.
// Swap i and new_i elements.
PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
--i; // repeat
}
}
growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
}
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(Group::kWidth - 1);
} else if (size() <= kMaxLoadFactor / 2 * capacity_) {
// Squash DELETED without growing if there is enough capacity.
drop_deletes_without_resize();
} else {
// Otherwise grow the container.
resize(capacity_ * 2 + 1);
}
}
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
elem))
return true;
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
seq.next();
assert(seq.index() < capacity_ && "full table!");
}
return false;
}
// Probes the raw_hash_set with the probe sequence for hash and returns the
// pointer to the first empty or deleted slot.
// NOTE: this function must work with tables having both kEmpty and kDelete
// in one group. Such tables appears during drop_deletes_without_resize.
//
// This function is very useful when insertions happen and:
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
size_t find_first_non_full(size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to force small tables to have random entries too, so
// in debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
if (ShouldInsertBackwards(hash, ctrl_))
return seq.offset(mask.HighestBitSet());
else
return seq.offset(mask.LowestBitSet());
#else
return seq.offset(mask.LowestBitSet());
#endif
}
assert(seq.index() < capacity_ && "full table!");
seq.next();
}
}
// TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that));
swap(tmp);
return *this;
}
raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
raw_hash_set tmp(std::move(that), alloc_ref());
swap(tmp);
return *this;
}
protected:
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
auto hash = hash_ref()(key);
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return {seq.offset(i), false};
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
}
return {prepare_insert(hash), true};
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
size_t target = find_first_non_full(hash);
if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) {
rehash_and_grow_if_necessary();
target = find_first_non_full(hash);
}
++size_;
growth_left() -= IsEmpty(ctrl_[target]);
set_ctrl(target, H2(hash));
return target;
}
// Constructs the value in the space pointed by the iterator. This only works
// after an unsuccessful find_or_prepare_insert() and before any other
// modifications happen in the raw_hash_set.
//
// PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
// k is the key decomposed from `forward<Args>(args)...`, and the bool
// returned by find_or_prepare_insert(k) was true.
// POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
template <class... Args>
void emplace_at(size_t i, Args&&... args) {
PolicyTraits::construct(&alloc_ref(), slots_ + i,
std::forward<Args>(args)...);
assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
iterator_at(i) &&
"constructed value does not match the lookup key");
}
iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
private:
friend struct RawHashSetTestOnlyAccess;
probe_seq<Group::kWidth> probe(size_t hash) const {
return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
}
// Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl() {
std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
ctrl_[capacity_] = kSentinel;
SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
}
// Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
// the end too.
void set_ctrl(size_t i, ctrl_t h) {
assert(i < capacity_);
if (IsFull(h)) {
SanitizerUnpoisonObject(slots_ + i);
} else {
SanitizerPoisonObject(slots_ + i);
}
ctrl_[i] = h;
ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h;
}
size_t& growth_left() { return settings_.template get<0>(); }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
const key_equal& eq_ref() const { return settings_.template get<2>(); }
allocator_type& alloc_ref() { return settings_.template get<3>(); }
const allocator_type& alloc_ref() const {
return settings_.template get<3>();
}
// On average each group has 2 empty slot (for the vectorized case).
static constexpr float kMaxLoadFactor = 14.0 / 16.0;
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t]
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
key_equal, allocator_type>
settings_{0, hasher{}, key_equal{}, allocator_type{}};
};
namespace hashtable_debug_internal {
template <typename Set>
struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
using Traits = typename Set::PolicyTraits;
using Slot = typename Traits::slot_type;
static size_t GetNumProbes(const Set& set,
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
auto seq = set.probe(hash);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
Traits::element(set.slots_ + seq.offset(i))))
return num_probes;
++num_probes;
}
if (g.MatchEmpty()) return num_probes;
seq.next();
++num_probes;
}
}
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity_;
if (capacity == 0) return 0;
auto layout = Set::MakeLayout(capacity);
size_t m = layout.AllocSize();
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * c.size();
} else {
for (size_t i = 0; i != capacity; ++i) {
if (container_internal::IsFull(c.ctrl_[i])) {
m += Traits::space_used(c.slots_ + i);
}
}
}
return m;
}
static size_t LowerBoundAllocatedByteSize(size_t size) {
size_t capacity = container_internal::NormalizeCapacity(
std::ceil(size / Set::kMaxLoadFactor));
if (capacity == 0) return 0;
auto layout = Set::MakeLayout(capacity);
size_t m = layout.AllocSize();
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * size;
}
return m;
}
};
} // namespace hashtable_debug_internal
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <limits>
#include <scoped_allocator>
#include "gtest/gtest.h"
#include "absl/container/internal/raw_hash_set.h"
#include "absl/container/internal/tracked.h"
namespace absl {
namespace container_internal {
namespace {
enum AllocSpec {
kPropagateOnCopy = 1,
kPropagateOnMove = 2,
kPropagateOnSwap = 4,
};
struct AllocState {
size_t num_allocs = 0;
std::set<void*> owned;
};
template <class T,
int Spec = kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>
class CheckedAlloc {
public:
template <class, int>
friend class CheckedAlloc;
using value_type = T;
CheckedAlloc() {}
explicit CheckedAlloc(size_t id) : id_(id) {}
CheckedAlloc(const CheckedAlloc&) = default;
CheckedAlloc& operator=(const CheckedAlloc&) = default;
template <class U>
CheckedAlloc(const CheckedAlloc<U, Spec>& that)
: id_(that.id_), state_(that.state_) {}
template <class U>
struct rebind {
using other = CheckedAlloc<U, Spec>;
};
using propagate_on_container_copy_assignment =
std::integral_constant<bool, (Spec & kPropagateOnCopy) != 0>;
using propagate_on_container_move_assignment =
std::integral_constant<bool, (Spec & kPropagateOnMove) != 0>;
using propagate_on_container_swap =
std::integral_constant<bool, (Spec & kPropagateOnSwap) != 0>;
CheckedAlloc select_on_container_copy_construction() const {
if (Spec & kPropagateOnCopy) return *this;
return {};
}
T* allocate(size_t n) {
T* ptr = std::allocator<T>().allocate(n);
track_alloc(ptr);
return ptr;
}
void deallocate(T* ptr, size_t n) {
memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned.
track_dealloc(ptr);
return std::allocator<T>().deallocate(ptr, n);
}
friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) {
return !(a == b);
}
size_t num_allocs() const { return state_->num_allocs; }
void swap(CheckedAlloc& that) {
using std::swap;
swap(id_, that.id_);
swap(state_, that.state_);
}
friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); }
friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) {
return o << "alloc(" << a.id_ << ")";
}
private:
void track_alloc(void* ptr) {
AllocState* state = state_.get();
++state->num_allocs;
if (!state->owned.insert(ptr).second)
ADD_FAILURE() << *this << " got previously allocated memory: " << ptr;
}
void track_dealloc(void* ptr) {
if (state_->owned.erase(ptr) != 1)
ADD_FAILURE() << *this
<< " deleting memory owned by another allocator: " << ptr;
}
size_t id_ = std::numeric_limits<size_t>::max();
std::shared_ptr<AllocState> state_ = std::make_shared<AllocState>();
};
struct Identity {
int32_t operator()(int32_t v) const { return v; }
};
struct Policy {
using slot_type = Tracked<int32_t>;
using init_type = Tracked<int32_t>;
using key_type = int32_t;
template <class allocator_type, class... Args>
static void construct(allocator_type* alloc, slot_type* slot,
Args&&... args) {
std::allocator_traits<allocator_type>::construct(
*alloc, slot, std::forward<Args>(args)...);
}
template <class allocator_type>
static void destroy(allocator_type* alloc, slot_type* slot) {
std::allocator_traits<allocator_type>::destroy(*alloc, slot);
}
template <class allocator_type>
static void transfer(allocator_type* alloc, slot_type* new_slot,
slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}
template <class F>
static auto apply(F&& f, int32_t v) -> decltype(std::forward<F>(f)(v, v)) {
return std::forward<F>(f)(v, v);
}
template <class F>
static auto apply(F&& f, const slot_type& v)
-> decltype(std::forward<F>(f)(v.val(), v)) {
return std::forward<F>(f)(v.val(), v);
}
template <class F>
static auto apply(F&& f, slot_type&& v)
-> decltype(std::forward<F>(f)(v.val(), std::move(v))) {
return std::forward<F>(f)(v.val(), std::move(v));
}
static slot_type& element(slot_type* slot) { return *slot; }
};
template <int Spec>
struct PropagateTest : public ::testing::Test {
using Alloc = CheckedAlloc<Tracked<int32_t>, Spec>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, Alloc>;
PropagateTest() {
EXPECT_EQ(a1, t1.get_allocator());
EXPECT_NE(a2, t1.get_allocator());
}
Alloc a1 = Alloc(1);
Table t1 = Table(0, a1);
Alloc a2 = Alloc(2);
};
using PropagateOnAll =
PropagateTest<kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>;
using NoPropagateOnCopy = PropagateTest<kPropagateOnMove | kPropagateOnSwap>;
using NoPropagateOnMove = PropagateTest<kPropagateOnCopy | kPropagateOnSwap>;
TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); }
TEST_F(PropagateOnAll, InsertAllocates) {
auto it = t1.insert(0).first;
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, InsertDecomposes) {
auto it = t1.insert(0).first;
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
EXPECT_FALSE(t1.insert(0).second);
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, RehashMoves) {
auto it = t1.insert(0).first;
EXPECT_EQ(0, it->num_moves());
t1.rehash(2 * t1.capacity());
EXPECT_EQ(2, a1.num_allocs());
it = t1.find(0);
EXPECT_EQ(1, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, CopyConstructor) {
auto it = t1.insert(0).first;
Table u(t1);
EXPECT_EQ(2, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(NoPropagateOnCopy, CopyConstructor) {
auto it = t1.insert(0).first;
Table u(t1);
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, u.get_allocator().num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(t1, a1);
EXPECT_EQ(2, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(t1, a1);
EXPECT_EQ(2, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(t1, a2);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, a2.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(t1, a2);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, a2.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(PropagateOnAll, MoveConstructor) {
auto it = t1.insert(0).first;
Table u(std::move(t1));
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveConstructor) {
auto it = t1.insert(0).first;
Table u(std::move(t1));
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(std::move(t1), a1);
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(std::move(t1), a1);
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(std::move(t1), a2);
it = u.find(0);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, a2.num_allocs());
EXPECT_EQ(1, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(std::move(t1), a2);
it = u.find(0);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, a2.num_allocs());
EXPECT_EQ(1, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(0, a1);
u = t1;
EXPECT_EQ(2, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(0, a1);
u = t1;
EXPECT_EQ(2, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(0, a2);
u = t1;
EXPECT_EQ(a1, u.get_allocator());
EXPECT_EQ(2, a1.num_allocs());
EXPECT_EQ(0, a2.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(0, a2);
u = t1;
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, a2.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(1, it->num_copies());
}
TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(0, a1);
u = std::move(t1);
EXPECT_EQ(a1, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(0, a1);
u = std::move(t1);
EXPECT_EQ(a1, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(0, a2);
u = std::move(t1);
EXPECT_EQ(a1, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, a2.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) {
auto it = t1.insert(0).first;
Table u(0, a2);
u = std::move(t1);
it = u.find(0);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(1, a2.num_allocs());
EXPECT_EQ(1, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, Swap) {
auto it = t1.insert(0).first;
Table u(0, a2);
u.swap(t1);
EXPECT_EQ(a1, u.get_allocator());
EXPECT_EQ(a2, t1.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
EXPECT_EQ(0, a2.num_allocs());
EXPECT_EQ(0, it->num_moves());
EXPECT_EQ(0, it->num_copies());
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/internal/raw_hash_set.h"
#include <array>
#include <cmath>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include <numeric>
#include <random>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/hashtable_debug.h"
#include "absl/strings/string_view.h"
namespace absl {
namespace container_internal {
struct RawHashSetTestOnlyAccess {
template <typename C>
static auto GetSlots(const C& c) -> decltype(c.slots_) {
return c.slots_;
}
};
namespace {
using ::testing::DoubleNear;
using ::testing::ElementsAre;
using ::testing::Optional;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(Util, NormalizeCapacity) {
constexpr size_t kMinCapacity = Group::kWidth - 1;
EXPECT_EQ(kMinCapacity, NormalizeCapacity(0));
EXPECT_EQ(kMinCapacity, NormalizeCapacity(1));
EXPECT_EQ(kMinCapacity, NormalizeCapacity(2));
EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity));
EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1));
EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2));
}
TEST(Util, probe_seq) {
probe_seq<16> seq(0, 127);
auto gen = [&]() {
size_t res = seq.offset();
seq.next();
return res;
};
std::vector<size_t> offsets(8);
std::generate_n(offsets.begin(), 8, gen);
EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
seq = probe_seq<16>(128, 127);
std::generate_n(offsets.begin(), 8, gen);
EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
}
TEST(BitMask, Smoke) {
EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
}
TEST(BitMask, WithShift) {
// See the non-SSE version of Group for details on what this math is for.
uint64_t ctrl = 0x1716151413121110;
uint64_t hash = 0x12;
constexpr uint64_t msbs = 0x8080808080808080ULL;
constexpr uint64_t lsbs = 0x0101010101010101ULL;
auto x = ctrl ^ (lsbs * hash);
uint64_t mask = (x - lsbs) & ~x & msbs;
EXPECT_EQ(0x0000000080800000, mask);
BitMask<uint64_t, 8, 3> b(mask);
EXPECT_EQ(*b, 2);
}
TEST(BitMask, LeadingTrailing) {
EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3);
EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6);
EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15);
EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0);
EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0);
EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
}
TEST(Group, EmptyGroup) {
for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
}
#if SWISSTABLE_HAVE_SSE2
TEST(Group, Match) {
ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
7, 5, 3, 1, 1, 1, 1, 1};
EXPECT_THAT(Group{group}.Match(0), ElementsAre());
EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
}
TEST(Group, MatchEmpty) {
ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
7, 5, 3, 1, 1, 1, 1, 1};
EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
}
TEST(Group, MatchEmptyOrDeleted) {
ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
7, 5, 3, 1, 1, 1, 1, 1};
EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
}
#else
TEST(Group, Match) {
ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
EXPECT_THAT(Group{group}.Match(0), ElementsAre());
EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
}
TEST(Group, MatchEmpty) {
ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
}
TEST(Group, MatchEmptyOrDeleted) {
ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
}
#endif
TEST(Batch, DropDeletes) {
constexpr size_t kCapacity = 63;
constexpr size_t kGroupWidth = container_internal::Group::kWidth;
std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
ctrl[kCapacity] = kSentinel;
std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
for (size_t i = 0; i != kCapacity; ++i) {
ctrl[i] = pattern[i % pattern.size()];
if (i < kGroupWidth - 1)
ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
}
ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
ASSERT_EQ(ctrl[kCapacity], kSentinel);
for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
if (i == kCapacity) expected = kSentinel;
if (expected == kDeleted) expected = kEmpty;
if (IsFull(expected)) expected = kDeleted;
EXPECT_EQ(ctrl[i], expected)
<< i << " " << int{pattern[i % pattern.size()]};
}
}
TEST(Group, CountLeadingEmptyOrDeleted) {
const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
for (ctrl_t empty : empty_examples) {
std::vector<ctrl_t> e(Group::kWidth, empty);
EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
for (ctrl_t full : full_examples) {
for (size_t i = 0; i != Group::kWidth; ++i) {
std::vector<ctrl_t> f(Group::kWidth, empty);
f[i] = full;
EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
}
std::vector<ctrl_t> f(Group::kWidth, empty);
f[Group::kWidth * 2 / 3] = full;
f[Group::kWidth / 2] = full;
EXPECT_EQ(
Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
}
}
}
struct IntPolicy {
using slot_type = int64_t;
using key_type = int64_t;
using init_type = int64_t;
static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
static void destroy(void*, int64_t*) {}
static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
*new_slot = *old_slot;
}
static int64_t& element(slot_type* slot) { return *slot; }
template <class F>
static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
return std::forward<F>(f)(x, x);
}
};
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
std::is_convertible<const K&, absl::string_view>::value>::type>
decltype(std::declval<F>()(
std::declval<const absl::string_view&>(), std::piecewise_construct,
std::declval<std::tuple<K>>(),
std::declval<V>())) static apply_impl(F&& f,
std::pair<std::tuple<K>, V> p) {
const absl::string_view& key = std::get<0>(p.first);
return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
std::move(p.second));
}
public:
struct slot_type {
struct ctor {};
template <class... Ts>
slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
std::pair<std::string, std::string> pair;
};
using key_type = std::string;
using init_type = std::pair<std::string, std::string>;
template <class allocator_type, class... Args>
static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
std::allocator_traits<allocator_type>::construct(
*alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
}
template <class allocator_type>
static void destroy(allocator_type* alloc, slot_type* slot) {
std::allocator_traits<allocator_type>::destroy(*alloc, slot);
}
template <class allocator_type>
static void transfer(allocator_type* alloc, slot_type* new_slot,
slot_type* old_slot) {
construct(alloc, new_slot, std::move(old_slot->pair));
destroy(alloc, old_slot);
}
static std::pair<std::string, std::string>& element(slot_type* slot) {
return slot->pair;
}
template <class F, class... Args>
static auto apply(F&& f, Args&&... args)
-> decltype(apply_impl(std::forward<F>(f),
PairArgs(std::forward<Args>(args)...))) {
return apply_impl(std::forward<F>(f),
PairArgs(std::forward<Args>(args)...));
}
};
struct StringHash : absl::Hash<absl::string_view> {
using is_transparent = void;
};
struct StringEq : std::equal_to<absl::string_view> {
using is_transparent = void;
};
struct StringTable
: raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
using Base = typename StringTable::raw_hash_set;
StringTable() {}
using Base::Base;
};
struct IntTable
: raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
std::equal_to<int64_t>, std::allocator<int64_t>> {
using Base = typename IntTable::raw_hash_set;
IntTable() {}
using Base::Base;
};
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
return 0;
}
};
struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
std::allocator<int>> {
using Base = typename BadTable::raw_hash_set;
BadTable() {}
using Base::Base;
};
TEST(Table, EmptyFunctorOptimization) {
static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
static_assert(std::is_empty<std::allocator<int>>::value, "");
struct MockTable {
void* ctrl;
void* slots;
size_t size;
size_t capacity;
size_t growth_left;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
};
struct StatefulHash : StatelessHash {
size_t dummy;
};
EXPECT_EQ(
sizeof(MockTable),
sizeof(
raw_hash_set<StringPolicy, StatelessHash,
std::equal_to<absl::string_view>, std::allocator<int>>));
EXPECT_EQ(
sizeof(MockTable) + sizeof(StatefulHash),
sizeof(
raw_hash_set<StringPolicy, StatefulHash,
std::equal_to<absl::string_view>, std::allocator<int>>));
}
TEST(Table, Empty) {
IntTable t;
EXPECT_EQ(0, t.size());
EXPECT_TRUE(t.empty());
}
#ifdef __GNUC__
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) {
asm volatile("" : : "r,m"(v) : "memory");
}
#endif
TEST(Table, Prefetch) {
IntTable t;
t.emplace(1);
// Works for both present and absent keys.
t.prefetch(1);
t.prefetch(2);
// Do not run in debug mode, when prefetch is not implemented, or when
// sanitizers are enabled.
#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \
!defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \
!defined(UNDEFINED_BEHAVIOR_SANITIZER)
const auto now = [] { return absl::base_internal::CycleClock::Now(); };
static constexpr int size = 1000000;
for (int i = 0; i < size; ++i) t.insert(i);
int64_t no_prefetch = 0, prefetch = 0;
for (int iter = 0; iter < 10; ++iter) {
int64_t time = now();
for (int i = 0; i < size; ++i) {
DoNotOptimize(t.find(i));
}
no_prefetch += now() - time;
time = now();
for (int i = 0; i < size; ++i) {
t.prefetch(i + 20);
DoNotOptimize(t.find(i));
}
prefetch += now() - time;
}
// no_prefetch is at least 30% slower.
EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3);
#endif
}
TEST(Table, LookupEmpty) {
IntTable t;
auto it = t.find(0);
EXPECT_TRUE(it == t.end());
}
TEST(Table, Insert1) {
IntTable t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_THAT(*res.first, 0);
EXPECT_EQ(1, t.size());
EXPECT_THAT(*t.find(0), 0);
}
TEST(Table, Insert2) {
IntTable t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_THAT(*res.first, 0);
EXPECT_EQ(1, t.size());
EXPECT_TRUE(t.find(1) == t.end());
res = t.emplace(1);
EXPECT_TRUE(res.second);
EXPECT_THAT(*res.first, 1);
EXPECT_EQ(2, t.size());
EXPECT_THAT(*t.find(0), 0);
EXPECT_THAT(*t.find(1), 1);
}
TEST(Table, InsertCollision) {
BadTable t;
EXPECT_TRUE(t.find(1) == t.end());
auto res = t.emplace(1);
EXPECT_TRUE(res.second);
EXPECT_THAT(*res.first, 1);
EXPECT_EQ(1, t.size());
EXPECT_TRUE(t.find(2) == t.end());
res = t.emplace(2);
EXPECT_THAT(*res.first, 2);
EXPECT_TRUE(res.second);
EXPECT_EQ(2, t.size());
EXPECT_THAT(*t.find(1), 1);
EXPECT_THAT(*t.find(2), 2);
}
// Test that we do not add existent element in case we need to search through
// many groups with deleted elements
TEST(Table, InsertCollisionAndFindAfterDelete) {
BadTable t; // all elements go to the same group.
// Have at least 2 groups with Group::kWidth collisions
// plus some extra collisions in the last group.
constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
for (size_t i = 0; i < kNumInserts; ++i) {
auto res = t.emplace(i);
EXPECT_TRUE(res.second);
EXPECT_THAT(*res.first, i);
EXPECT_EQ(i + 1, t.size());
}
// Remove elements one by one and check
// that we still can find all other elements.
for (size_t i = 0; i < kNumInserts; ++i) {
EXPECT_EQ(1, t.erase(i)) << i;
for (size_t j = i + 1; j < kNumInserts; ++j) {
EXPECT_THAT(*t.find(j), j);
auto res = t.emplace(j);
EXPECT_FALSE(res.second) << i << " " << j;
EXPECT_THAT(*res.first, j);
EXPECT_EQ(kNumInserts - i - 1, t.size());
}
}
EXPECT_TRUE(t.empty());
}
TEST(Table, LazyEmplace) {
StringTable t;
bool called = false;
auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
called = true;
f("abc", "ABC");
});
EXPECT_TRUE(called);
EXPECT_THAT(*it, Pair("abc", "ABC"));
called = false;
it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
called = true;
f("abc", "DEF");
});
EXPECT_FALSE(called);
EXPECT_THAT(*it, Pair("abc", "ABC"));
}
TEST(Table, ContainsEmpty) {
IntTable t;
EXPECT_FALSE(t.contains(0));
}
TEST(Table, Contains1) {
IntTable t;
EXPECT_TRUE(t.insert(0).second);
EXPECT_TRUE(t.contains(0));
EXPECT_FALSE(t.contains(1));
EXPECT_EQ(1, t.erase(0));
EXPECT_FALSE(t.contains(0));
}
TEST(Table, Contains2) {
IntTable t;
EXPECT_TRUE(t.insert(0).second);
EXPECT_TRUE(t.contains(0));
EXPECT_FALSE(t.contains(1));
t.clear();
EXPECT_FALSE(t.contains(0));
}
int decompose_constructed;
struct DecomposeType {
DecomposeType(int i) : i(i) { // NOLINT
++decompose_constructed;
}
explicit DecomposeType(const char* d) : DecomposeType(*d) {}
int i;
};
struct DecomposeHash {
using is_transparent = void;
size_t operator()(DecomposeType a) const { return a.i; }
size_t operator()(int a) const { return a; }
size_t operator()(const char* a) const { return *a; }
};
struct DecomposeEq {
using is_transparent = void;
bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
bool operator()(DecomposeType a, int b) const { return a.i == b; }
bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
};
struct DecomposePolicy {
using slot_type = DecomposeType;
using key_type = DecomposeType;
using init_type = DecomposeType;
template <typename T>
static void construct(void*, DecomposeType* slot, T&& v) {
*slot = DecomposeType(std::forward<T>(v));
}
static void destroy(void*, DecomposeType*) {}
static DecomposeType& element(slot_type* slot) { return *slot; }
template <class F, class T>
static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
return std::forward<F>(f)(x, x);
}
};
template <typename Hash, typename Eq>
void TestDecompose(bool construct_three) {
DecomposeType elem{0};
const int one = 1;
const char* three_p = "3";
const auto& three = three_p;
raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
decompose_constructed = 0;
int expected_constructed = 0;
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.insert(elem);
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.insert(1);
EXPECT_EQ(++expected_constructed, decompose_constructed);
set1.emplace("3");
EXPECT_EQ(++expected_constructed, decompose_constructed);
EXPECT_EQ(expected_constructed, decompose_constructed);
{ // insert(T&&)
set1.insert(1);
EXPECT_EQ(expected_constructed, decompose_constructed);
}
{ // insert(const T&)
set1.insert(one);
EXPECT_EQ(expected_constructed, decompose_constructed);
}
{ // insert(hint, T&&)
set1.insert(set1.begin(), 1);
EXPECT_EQ(expected_constructed, decompose_constructed);
}
{ // insert(hint, const T&)
set1.insert(set1.begin(), one);
EXPECT_EQ(expected_constructed, decompose_constructed);
}
{ // emplace(...)
set1.emplace(1);
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.emplace("3");
expected_constructed += construct_three;
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.emplace(one);
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.emplace(three);
expected_constructed += construct_three;
EXPECT_EQ(expected_constructed, decompose_constructed);
}
{ // emplace_hint(...)
set1.emplace_hint(set1.begin(), 1);
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.emplace_hint(set1.begin(), "3");
expected_constructed += construct_three;
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.emplace_hint(set1.begin(), one);
EXPECT_EQ(expected_constructed, decompose_constructed);
set1.emplace_hint(set1.begin(), three);
expected_constructed += construct_three;
EXPECT_EQ(expected_constructed, decompose_constructed);
}
}
TEST(Table, Decompose) {
TestDecompose<DecomposeHash, DecomposeEq>(false);
struct TransparentHashIntOverload {
size_t operator()(DecomposeType a) const { return a.i; }
size_t operator()(int a) const { return a; }
};
struct TransparentEqIntOverload {
bool operator()(DecomposeType a, DecomposeType b) const {
return a.i == b.i;
}
bool operator()(DecomposeType a, int b) const { return a.i == b; }
};
TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
}
// Returns the largest m such that a table with m elements has the same number
// of buckets as a table with n elements.
size_t MaxDensitySize(size_t n) {
IntTable t;
t.reserve(n);
for (size_t i = 0; i != n; ++i) t.emplace(i);
const size_t c = t.bucket_count();
while (c == t.bucket_count()) t.emplace(n++);
return t.size() - 1;
}
struct Modulo1000Hash {
size_t operator()(int x) const { return x % 1000; }
};
struct Modulo1000HashTable
: public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
std::allocator<int>> {};
// Test that rehash with no resize happen in case of many deleted slots.
TEST(Table, RehashWithNoResize) {
Modulo1000HashTable t;
// Adding the same length (and the same hash) strings
// to have at least kMinFullGroups groups
// with Group::kWidth collisions. Then feel upto MaxDensitySize;
const size_t kMinFullGroups = 7;
std::vector<int> keys;
for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
int k = i * 1000;
t.emplace(k);
keys.push_back(k);
}
const size_t capacity = t.capacity();
// Remove elements from all groups except the first and the last one.
// All elements removed from full groups will be marked as kDeleted.
const size_t erase_begin = Group::kWidth / 2;
const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
for (size_t i = erase_begin; i < erase_end; ++i) {
EXPECT_EQ(1, t.erase(keys[i])) << i;
}
keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
auto last_key = keys.back();
size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
// Make sure that we have to make a lot of probes for last key.
ASSERT_GT(last_key_num_probes, kMinFullGroups);
int x = 1;
// Insert and erase one element, before inplace rehash happen.
while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
t.emplace(x);
ASSERT_EQ(capacity, t.capacity());
// All elements should be there.
ASSERT_TRUE(t.find(x) != t.end()) << x;
for (const auto& k : keys) {
ASSERT_TRUE(t.find(k) != t.end()) << k;
}
t.erase(x);
++x;
}
}
TEST(Table, InsertEraseStressTest) {
IntTable t;
const size_t kMinElementCount = 250;
std::deque<int> keys;
size_t i = 0;
for (; i < MaxDensitySize(kMinElementCount); ++i) {
t.emplace(i);
keys.push_back(i);
}
const size_t kNumIterations = 1000000;
for (; i < kNumIterations; ++i) {
ASSERT_EQ(1, t.erase(keys.front()));
keys.pop_front();
t.emplace(i);
keys.push_back(i);
}
}
TEST(Table, InsertOverloads) {
StringTable t;
// These should all trigger the insert(init_type) overload.
t.insert({{}, {}});
t.insert({"ABC", {}});
t.insert({"DEF", "!!!"});
EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
Pair("DEF", "!!!")));
}
TEST(Table, LargeTable) {
IntTable t;
for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
}
// Timeout if copy is quadratic as it was in Rust.
TEST(Table, EnsureNonQuadraticAsInRust) {
static const size_t kLargeSize = 1 << 15;
IntTable t;
for (size_t i = 0; i != kLargeSize; ++i) {
t.insert(i);
}
// If this is quadratic, the test will timeout.
IntTable t2;
for (const auto& entry : t) t2.insert(entry);
}
TEST(Table, ClearBug) {
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
constexpr size_t max_size = capacity / 2;
for (size_t i = 0; i < max_size; ++i) {
t.insert(i);
}
ASSERT_EQ(capacity, t.capacity());
intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
t.clear();
ASSERT_EQ(capacity, t.capacity());
for (size_t i = 0; i < max_size; ++i) {
t.insert(i);
}
ASSERT_EQ(capacity, t.capacity());
intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
// We are checking that original and second are close enough to each other
// that they are probably still in the same group. This is not strictly
// guaranteed.
EXPECT_LT(std::abs(original - second),
capacity * sizeof(IntTable::value_type));
}
TEST(Table, Erase) {
IntTable t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_EQ(1, t.size());
t.erase(res.first);
EXPECT_EQ(0, t.size());
EXPECT_TRUE(t.find(0) == t.end());
}
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
// 3. Take first Group::kWidth - 1 to bad_keys array.
// 4. Clear the table without resize.
// 5. Go to point 2 while N keys not collected
std::vector<int64_t> CollectBadMergeKeys(size_t N) {
static constexpr int kGroupSize = Group::kWidth - 1;
auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
for (size_t i = b; i != e; ++i) {
t->emplace(i);
}
std::vector<int64_t> res;
res.reserve(kGroupSize);
auto it = t->begin();
for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
res.push_back(*it);
}
return res;
};
std::vector<int64_t> bad_keys;
bad_keys.reserve(N);
IntTable t;
t.reserve(N * 2);
for (size_t b = 0; bad_keys.size() < N; b += N) {
auto keys = topk_range(b, b + N, &t);
bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
t.erase(t.begin(), t.end());
EXPECT_TRUE(t.empty());
}
return bad_keys;
}
struct ProbeStats {
// Number of elements with specific probe length over all tested tables.
std::vector<size_t> all_probes_histogram;
// Ratios total_probe_length/size for every tested table.
std::vector<double> single_table_ratios;
friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
ProbeStats res = a;
res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
b.all_probes_histogram.size()));
std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
res.all_probes_histogram.begin(),
res.all_probes_histogram.begin(), std::plus<size_t>());
res.single_table_ratios.insert(res.single_table_ratios.end(),
b.single_table_ratios.begin(),
b.single_table_ratios.end());
return res;
}
// Average ratio total_probe_length/size over tables.
double AvgRatio() const {
return std::accumulate(single_table_ratios.begin(),
single_table_ratios.end(), 0.0) /
single_table_ratios.size();
}
// Maximum ratio total_probe_length/size over tables.
double MaxRatio() const {
return *std::max_element(single_table_ratios.begin(),
single_table_ratios.end());
}
// Percentile ratio total_probe_length/size over tables.
double PercentileRatio(double Percentile = 0.95) const {
auto r = single_table_ratios;
auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
if (mid != r.end()) {
std::nth_element(r.begin(), mid, r.end());
return *mid;
} else {
return MaxRatio();
}
}
// Maximum probe length over all elements and all tables.
size_t MaxProbe() const { return all_probes_histogram.size(); }
// Fraction of elements with specified probe length.
std::vector<double> ProbeNormalizedHistogram() const {
double total_elements = std::accumulate(all_probes_histogram.begin(),
all_probes_histogram.end(), 0ull);
std::vector<double> res;
for (size_t p : all_probes_histogram) {
res.push_back(p / total_elements);
}
return res;
}
size_t PercentileProbe(double Percentile = 0.99) const {
size_t idx = 0;
for (double p : ProbeNormalizedHistogram()) {
if (Percentile > p) {
Percentile -= p;
++idx;
} else {
return idx;
}
}
return idx;
}
friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
<< ", PercentileRatio:" << s.PercentileRatio()
<< ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
for (double p : s.ProbeNormalizedHistogram()) {
out << p << ",";
}
out << "]}";
return out;
}
};
struct ExpectedStats {
double avg_ratio;
double max_ratio;
std::vector<std::pair<double, double>> pecentile_ratios;
std::vector<std::pair<double, double>> pecentile_probes;
friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
<< ", PercentileRatios: [";
for (auto el : s.pecentile_ratios) {
out << el.first << ":" << el.second << ", ";
}
out << "], PercentileProbes: [";
for (auto el : s.pecentile_probes) {
out << el.first << ":" << el.second << ", ";
}
out << "]}";
return out;
}
};
void VerifyStats(size_t size, const ExpectedStats& exp,
const ProbeStats& stats) {
EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
for (auto pr : exp.pecentile_ratios) {
EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
<< size << " " << pr.first << " " << stats;
}
for (auto pr : exp.pecentile_probes) {
EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
<< size << " " << pr.first << " " << stats;
}
}
using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
// Collect total ProbeStats on num_iters iterations of the following algorithm:
// 1. Create new table and reserve it to keys.size() * 2
// 2. Insert all keys xored with seed
// 3. Collect ProbeStats from final table.
ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
size_t num_iters) {
const size_t reserve_size = keys.size() * 2;
ProbeStats stats;
int64_t seed = 0x71b1a19b907d6e33;
while (num_iters--) {
seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
IntTable t1;
t1.reserve(reserve_size);
for (const auto& key : keys) {
t1.emplace(key ^ seed);
}
auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
stats.all_probes_histogram.resize(
std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
std::transform(probe_histogram.begin(), probe_histogram.end(),
stats.all_probes_histogram.begin(),
stats.all_probes_histogram.begin(), std::plus<size_t>());
size_t total_probe_seq_length = 0;
for (size_t i = 0; i < probe_histogram.size(); ++i) {
total_probe_seq_length += i * probe_histogram[i];
}
stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
keys.size());
t1.erase(t1.begin(), t1.end());
}
return stats;
}
ExpectedStats XorSeedExpectedStats() {
constexpr bool kRandomizesInserts =
#if NDEBUG
false;
#else // NDEBUG
true;
#endif // NDEBUG
// The effective load factor is larger in non-opt mode because we insert
// elements out of order.
switch (container_internal::Group::kWidth) {
case 8:
if (kRandomizesInserts) {
return {0.05,
1.0,
{{0.95, 0.5}},
{{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
} else {
return {0.05,
2.0,
{{0.95, 0.1}},
{{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
}
break;
case 16:
if (kRandomizesInserts) {
return {0.1,
1.0,
{{0.95, 0.1}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
return {0.05,
1.0,
{{0.95, 0.05}},
{{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
}
break;
default:
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
}
return {};
}
TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
for (size_t size : sizes) {
stats[size] =
CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
}
auto expected = XorSeedExpectedStats();
for (size_t size : sizes) {
auto& stat = stats[size];
VerifyStats(size, expected, stat);
}
}
// Collect total ProbeStats on num_iters iterations of the following algorithm:
// 1. Create new table
// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
// 3. Collect ProbeStats from final table
ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
const std::vector<int64_t>& keys, size_t num_iters) {
ProbeStats stats;
std::random_device rd;
std::mt19937 rng(rd());
auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
while (num_iters--) {
IntTable t1;
size_t num_keys = keys.size() / 10;
size_t start = dist(rng);
for (size_t i = 0; i != num_keys; ++i) {
for (size_t j = 0; j != 10; ++j) {
t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
}
}
auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
stats.all_probes_histogram.resize(
std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
std::transform(probe_histogram.begin(), probe_histogram.end(),
stats.all_probes_histogram.begin(),
stats.all_probes_histogram.begin(), std::plus<size_t>());
size_t total_probe_seq_length = 0;
for (size_t i = 0; i < probe_histogram.size(); ++i) {
total_probe_seq_length += i * probe_histogram[i];
}
stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
t1.size());
t1.erase(t1.begin(), t1.end());
}
return stats;
}
ExpectedStats LinearTransformExpectedStats() {
constexpr bool kRandomizesInserts =
#if NDEBUG
false;
#else // NDEBUG
true;
#endif // NDEBUG
// The effective load factor is larger in non-opt mode because we insert
// elements out of order.
switch (container_internal::Group::kWidth) {
case 8:
if (kRandomizesInserts) {
return {0.1,
0.5,
{{0.95, 0.3}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
return {0.15,
0.5,
{{0.95, 0.3}},
{{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
}
break;
case 16:
if (kRandomizesInserts) {
return {0.1,
0.4,
{{0.95, 0.3}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
return {0.05,
0.2,
{{0.95, 0.1}},
{{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
}
break;
default:
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
}
return {};
}
TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
for (size_t size : sizes) {
stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
CollectBadMergeKeys(size), 300);
}
auto expected = LinearTransformExpectedStats();
for (size_t size : sizes) {
auto& stat = stats[size];
VerifyStats(size, expected, stat);
}
}
TEST(Table, EraseCollision) {
BadTable t;
// 1 2 3
t.emplace(1);
t.emplace(2);
t.emplace(3);
EXPECT_THAT(*t.find(1), 1);
EXPECT_THAT(*t.find(2), 2);
EXPECT_THAT(*t.find(3), 3);
EXPECT_EQ(3, t.size());
// 1 DELETED 3
t.erase(t.find(2));
EXPECT_THAT(*t.find(1), 1);
EXPECT_TRUE(t.find(2) == t.end());
EXPECT_THAT(*t.find(3), 3);
EXPECT_EQ(2, t.size());
// DELETED DELETED 3
t.erase(t.find(1));
EXPECT_TRUE(t.find(1) == t.end());
EXPECT_TRUE(t.find(2) == t.end());
EXPECT_THAT(*t.find(3), 3);
EXPECT_EQ(1, t.size());
// DELETED DELETED DELETED
t.erase(t.find(3));
EXPECT_TRUE(t.find(1) == t.end());
EXPECT_TRUE(t.find(2) == t.end());
EXPECT_TRUE(t.find(3) == t.end());
EXPECT_EQ(0, t.size());
}
TEST(Table, EraseInsertProbing) {
BadTable t(100);
// 1 2 3 4
t.emplace(1);
t.emplace(2);
t.emplace(3);
t.emplace(4);
// 1 DELETED 3 DELETED
t.erase(t.find(2));
t.erase(t.find(4));
// 1 10 3 11 12
t.emplace(10);
t.emplace(11);
t.emplace(12);
EXPECT_EQ(5, t.size());
EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
}
TEST(Table, Clear) {
IntTable t;
EXPECT_TRUE(t.find(0) == t.end());
t.clear();
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_EQ(1, t.size());
t.clear();
EXPECT_EQ(0, t.size());
EXPECT_TRUE(t.find(0) == t.end());
}
TEST(Table, Swap) {
IntTable t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_EQ(1, t.size());
IntTable u;
t.swap(u);
EXPECT_EQ(0, t.size());
EXPECT_EQ(1, u.size());
EXPECT_TRUE(t.find(0) == t.end());
EXPECT_THAT(*u.find(0), 0);
}
TEST(Table, Rehash) {
IntTable t;
EXPECT_TRUE(t.find(0) == t.end());
t.emplace(0);
t.emplace(1);
EXPECT_EQ(2, t.size());
t.rehash(128);
EXPECT_EQ(2, t.size());
EXPECT_THAT(*t.find(0), 0);
EXPECT_THAT(*t.find(1), 1);
}
TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
IntTable t;
t.emplace(0);
t.emplace(1);
auto* p = &*t.find(0);
t.rehash(1);
EXPECT_EQ(p, &*t.find(0));
}
TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
IntTable t;
t.rehash(0);
EXPECT_EQ(0, t.bucket_count());
}
TEST(Table, RehashZeroDeallocatesEmptyTable) {
IntTable t;
t.emplace(0);
t.clear();
EXPECT_NE(0, t.bucket_count());
t.rehash(0);
EXPECT_EQ(0, t.bucket_count());
}
TEST(Table, RehashZeroForcesRehash) {
IntTable t;
t.emplace(0);
t.emplace(1);
auto* p = &*t.find(0);
t.rehash(0);
EXPECT_NE(p, &*t.find(0));
}
TEST(Table, ConstructFromInitList) {
using P = std::pair<std::string, std::string>;
struct Q {
operator P() const { return {}; }
};
StringTable t = {P(), Q(), {}, {{}, {}}};
}
TEST(Table, CopyConstruct) {
IntTable t;
t.max_load_factor(.321f);
t.emplace(0);
EXPECT_EQ(1, t.size());
{
IntTable u(t);
EXPECT_EQ(1, u.size());
EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u{t};
EXPECT_EQ(1, u.size());
EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u = t;
EXPECT_EQ(1, u.size());
EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
}
TEST(Table, CopyConstructWithAlloc) {
StringTable t;
t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(t, Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
struct ExplicitAllocIntTable
: raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
std::equal_to<int64_t>, Alloc<int64_t>> {
ExplicitAllocIntTable() {}
};
TEST(Table, AllocWithExplicitCtor) {
ExplicitAllocIntTable t;
EXPECT_EQ(0, t.size());
}
TEST(Table, MoveConstruct) {
{
StringTable t;
t.max_load_factor(.321f);
const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t));
EXPECT_EQ(1, u.size());
EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
t.max_load_factor(.321f);
const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u{std::move(t)};
EXPECT_EQ(1, u.size());
EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
t.max_load_factor(.321f);
const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u = std::move(t);
EXPECT_EQ(1, u.size());
EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
}
TEST(Table, MoveConstructWithAlloc) {
StringTable t;
t.max_load_factor(.321f);
const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopyAssign) {
StringTable t;
t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = t;
EXPECT_EQ(1, u.size());
EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopySelfAssign) {
StringTable t;
t.max_load_factor(.321f);
const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
t = *&t;
EXPECT_EQ(1, t.size());
EXPECT_EQ(lf, t.max_load_factor());
EXPECT_THAT(*t.find("a"), Pair("a", "b"));
}
TEST(Table, MoveAssign) {
StringTable t;
t.max_load_factor(.321f);
const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = std::move(t);
EXPECT_EQ(1, u.size());
EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, Equality) {
StringTable t;
std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}};
t.insert(std::begin(v), std::end(v));
StringTable u = t;
EXPECT_EQ(u, t);
}
TEST(Table, Equality2) {
StringTable t;
std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
TEST(Table, Equality3) {
StringTable t;
std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
TEST(Table, NumDeletedRegression) {
IntTable t;
t.emplace(0);
t.erase(t.find(0));
// construct over a deleted slot.
t.emplace(0);
t.clear();
}
TEST(Table, FindFullDeletedRegression) {
IntTable t;
for (int i = 0; i < 1000; ++i) {
t.emplace(i);
t.erase(t.find(i));
}
EXPECT_EQ(0, t.size());
}
TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
size_t n;
{
// Compute n such that n is the maximum number of elements before rehash.
IntTable t;
t.emplace(0);
size_t c = t.bucket_count();
for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
--n;
}
IntTable t;
t.rehash(n);
const size_t c = t.bucket_count();
for (size_t i = 0; i != n; ++i) t.emplace(i);
EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
t.erase(0);
t.emplace(0);
EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
}
TEST(Table, NoThrowMoveConstruct) {
ASSERT_TRUE(
std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
ASSERT_TRUE(std::is_nothrow_copy_constructible<
std::equal_to<absl::string_view>>::value);
ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
}
TEST(Table, NoThrowMoveAssign) {
ASSERT_TRUE(
std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
ASSERT_TRUE(
std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
ASSERT_TRUE(
absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
}
TEST(Table, NoThrowSwappable) {
ASSERT_TRUE(
container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
ASSERT_TRUE(container_internal::IsNoThrowSwappable<
std::equal_to<absl::string_view>>());
ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
}
TEST(Table, HeterogeneousLookup) {
struct Hash {
size_t operator()(int64_t i) const { return i; }
size_t operator()(double i) const {
ADD_FAILURE();
return i;
}
};
struct Eq {
bool operator()(int64_t a, int64_t b) const { return a == b; }
bool operator()(double a, int64_t b) const {
ADD_FAILURE();
return a == b;
}
bool operator()(int64_t a, double b) const {
ADD_FAILURE();
return a == b;
}
bool operator()(double a, double b) const {
ADD_FAILURE();
return a == b;
}
};
struct THash {
using is_transparent = void;
size_t operator()(int64_t i) const { return i; }
size_t operator()(double i) const { return i; }
};
struct TEq {
using is_transparent = void;
bool operator()(int64_t a, int64_t b) const { return a == b; }
bool operator()(double a, int64_t b) const { return a == b; }
bool operator()(int64_t a, double b) const { return a == b; }
bool operator()(double a, double b) const { return a == b; }
};
raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
// It will convert to int64_t before the query.
EXPECT_EQ(1, *s.find(double{1.1}));
raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
// It will try to use the double, and fail to find the object.
EXPECT_TRUE(ts.find(1.1) == ts.end());
}
template <class Table>
using CallFind = decltype(std::declval<Table&>().find(17));
template <class Table>
using CallErase = decltype(std::declval<Table&>().erase(17));
template <class Table>
using CallExtract = decltype(std::declval<Table&>().extract(17));
template <class Table>
using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
template <class Table>
using CallCount = decltype(std::declval<Table&>().count(17));
template <template <typename> class C, class Table, class = void>
struct VerifyResultOf : std::false_type {};
template <template <typename> class C, class Table>
struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
TEST(Table, HeterogeneousLookupOverloads) {
using NonTransparentTable =
raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
std::equal_to<absl::string_view>, std::allocator<int>>;
EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
using TransparentTable = raw_hash_set<
StringPolicy,
absl::container_internal::hash_default_hash<absl::string_view>,
absl::container_internal::hash_default_eq<absl::string_view>,
std::allocator<int>>;
EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
}
// TODO(alkis): Expand iterator tests.
TEST(Iterator, IsDefaultConstructible) {
StringTable::iterator i;
EXPECT_TRUE(i == StringTable::iterator());
}
TEST(ConstIterator, IsDefaultConstructible) {
StringTable::const_iterator i;
EXPECT_TRUE(i == StringTable::const_iterator());
}
TEST(Iterator, ConvertsToConstIterator) {
StringTable::iterator i;
EXPECT_TRUE(i == StringTable::const_iterator());
}
TEST(Iterator, Iterates) {
IntTable t;
for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
}
TEST(Table, Merge) {
StringTable t1, t2;
t1.emplace("0", "-0");
t1.emplace("1", "-1");
t2.emplace("0", "~0");
t2.emplace("2", "~2");
EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
t1.merge(t2);
EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
Pair("2", "~2")));
EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
}
TEST(Nodes, EmptyNodeType) {
using node_type = StringTable::node_type;
node_type n;
EXPECT_FALSE(n);
EXPECT_TRUE(n.empty());
EXPECT_TRUE((std::is_same<node_type::allocator_type,
StringTable::allocator_type>::value));
}
TEST(Nodes, ExtractInsert) {
constexpr char k0[] = "Very long std::string zero.";
constexpr char k1[] = "Very long std::string one.";
constexpr char k2[] = "Very long std::string two.";
StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
EXPECT_THAT(t,
UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
auto node = t.extract(k0);
EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
EXPECT_TRUE(node);
EXPECT_FALSE(node.empty());
StringTable t2;
auto res = t2.insert(std::move(node));
EXPECT_TRUE(res.inserted);
EXPECT_THAT(*res.position, Pair(k0, ""));
EXPECT_FALSE(res.node);
EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
// Not there.
EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
node = t.extract("Not there!");
EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
EXPECT_FALSE(node);
// Inserting nothing.
res = t2.insert(std::move(node));
EXPECT_FALSE(res.inserted);
EXPECT_EQ(res.position, t2.end());
EXPECT_FALSE(res.node);
EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
t.emplace(k0, "1");
node = t.extract(k0);
// Insert duplicate.
res = t2.insert(std::move(node));
EXPECT_FALSE(res.inserted);
EXPECT_THAT(*res.position, Pair(k0, ""));
EXPECT_TRUE(res.node);
EXPECT_FALSE(node);
}
StringTable MakeSimpleTable(size_t size) {
StringTable t;
for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), "");
return t;
}
std::string OrderOfIteration(const StringTable& t) {
std::string order;
for (auto& p : t) order += p.first;
return order;
}
TEST(Table, IterationOrderChangesByInstance) {
// Needs to be more than kWidth elements to be able to affect order.
const StringTable reference = MakeSimpleTable(20);
// Since order is non-deterministic we can't just try once and verify.
// We'll try until we find that order changed. It should not take many tries
// for that.
// Important: we have to keep the old tables around. Otherwise tcmalloc will
// just give us the same blocks and we would be doing the same order again.
std::vector<StringTable> garbage;
for (int i = 0; i < 10; ++i) {
auto trial = MakeSimpleTable(20);
if (OrderOfIteration(trial) != OrderOfIteration(reference)) {
// We are done.
return;
}
garbage.push_back(std::move(trial));
}
FAIL();
}
TEST(Table, IterationOrderChangesOnRehash) {
// Since order is non-deterministic we can't just try once and verify.
// We'll try until we find that order changed. It should not take many tries
// for that.
// Important: we have to keep the old tables around. Otherwise tcmalloc will
// just give us the same blocks and we would be doing the same order again.
std::vector<StringTable> garbage;
for (int i = 0; i < 10; ++i) {
// Needs to be more than kWidth elements to be able to affect order.
StringTable t = MakeSimpleTable(20);
const std::string reference = OrderOfIteration(t);
// Force rehash to the same size.
t.rehash(0);
std::string trial = OrderOfIteration(t);
if (trial != reference) {
// We are done.
return;
}
garbage.push_back(std::move(t));
}
FAIL();
}
TEST(Table, IterationOrderChangesForSmallTables) {
// Since order is non-deterministic we can't just try once and verify.
// We'll try until we find that order changed.
// Important: we have to keep the old tables around. Otherwise tcmalloc will
// just give us the same blocks and we would be doing the same order again.
StringTable reference_table = MakeSimpleTable(5);
const std::string reference = OrderOfIteration(reference_table);
std::vector<StringTable> garbage;
for (int i = 0; i < 50; ++i) {
StringTable t = MakeSimpleTable(5);
std::string trial = OrderOfIteration(t);
if (trial != reference) {
// We are done.
return;
}
garbage.push_back(std::move(t));
}
FAIL() << "Iteration order remained the same across many attempts.";
}
// Fill the table to 3 different load factors (min, median, max) and evaluate
// the percentage of perfect hits using the debug API.
template <class Table, class AddFn>
std::vector<double> CollectPerfectRatios(Table t, AddFn add) {
using Key = typename Table::key_type;
// First, fill enough to have a good distribution.
constexpr size_t kMinSize = 10000;
std::vector<Key> keys;
while (t.size() < kMinSize) keys.push_back(add(t));
// Then, insert until we reach min load factor.
double lf = t.load_factor();
while (lf <= t.load_factor()) keys.push_back(add(t));
// We are now at min load factor. Take a snapshot.
size_t perfect = 0;
auto update_perfect = [&](Key k) {
perfect += GetHashtableDebugNumProbes(t, k) == 0;
};
for (const auto& k : keys) update_perfect(k);
std::vector<double> perfect_ratios;
// Keep going until we hit max load factor.
while (t.load_factor() < .6) {
perfect_ratios.push_back(1.0 * perfect / t.size());
update_perfect(add(t));
}
while (t.load_factor() > .5) {
perfect_ratios.push_back(1.0 * perfect / t.size());
update_perfect(add(t));
}
return perfect_ratios;
}
std::vector<std::pair<double, double>> StringTablePefectRatios() {
constexpr bool kRandomizesInserts =
#if NDEBUG
false;
#else // NDEBUG
true;
#endif // NDEBUG
// The effective load factor is larger in non-opt mode because we insert
// elements out of order.
switch (container_internal::Group::kWidth) {
case 8:
if (kRandomizesInserts) {
return {{0.986, 0.02}, {0.95, 0.02}, {0.89, 0.02}};
} else {
return {{0.995, 0.01}, {0.97, 0.01}, {0.89, 0.02}};
}
break;
case 16:
if (kRandomizesInserts) {
return {{0.973, 0.01}, {0.965, 0.01}, {0.92, 0.02}};
} else {
return {{0.995, 0.005}, {0.99, 0.005}, {0.94, 0.01}};
}
break;
default:
// Ignore anything else.
return {};
}
}
// This is almost a change detector, but it allows us to know how we are
// affecting the probe distribution.
TEST(Table, EffectiveLoadFactorStrings) {
std::vector<double> perfect_ratios =
CollectPerfectRatios(StringTable(), [](StringTable& t) {
return t.emplace(std::to_string(t.size()), "").first->first;
});
auto ratios = StringTablePefectRatios();
if (ratios.empty()) return;
EXPECT_THAT(perfect_ratios.front(),
DoubleNear(ratios[0].first, ratios[0].second));
EXPECT_THAT(perfect_ratios[perfect_ratios.size() / 2],
DoubleNear(ratios[1].first, ratios[1].second));
EXPECT_THAT(perfect_ratios.back(),
DoubleNear(ratios[2].first, ratios[2].second));
}
std::vector<std::pair<double, double>> IntTablePefectRatios() {
constexpr bool kRandomizesInserts =
#ifdef NDEBUG
false;
#else // NDEBUG
true;
#endif // NDEBUG
// The effective load factor is larger in non-opt mode because we insert
// elements out of order.
switch (container_internal::Group::kWidth) {
case 8:
if (kRandomizesInserts) {
return {{0.99, 0.02}, {0.985, 0.02}, {0.95, 0.05}};
} else {
return {{0.99, 0.01}, {0.99, 0.01}, {0.95, 0.02}};
}
break;
case 16:
if (kRandomizesInserts) {
return {{0.98, 0.02}, {0.978, 0.02}, {0.96, 0.02}};
} else {
return {{0.998, 0.003}, {0.995, 0.01}, {0.975, 0.02}};
}
break;
default:
// Ignore anything else.
return {};
}
}
// This is almost a change detector, but it allows us to know how we are
// affecting the probe distribution.
TEST(Table, EffectiveLoadFactorInts) {
std::vector<double> perfect_ratios = CollectPerfectRatios(
IntTable(), [](IntTable& t) { return *t.emplace(t.size()).first; });
auto ratios = IntTablePefectRatios();
if (ratios.empty()) return;
EXPECT_THAT(perfect_ratios.front(),
DoubleNear(ratios[0].first, ratios[0].second));
EXPECT_THAT(perfect_ratios[perfect_ratios.size() / 2],
DoubleNear(ratios[1].first, ratios[1].second));
EXPECT_THAT(perfect_ratios.back(),
DoubleNear(ratios[2].first, ratios[2].second));
}
// Confirm that we assert if we try to erase() end().
TEST(Table, EraseOfEndAsserts) {
// Use an assert with side-effects to figure out if they are actually enabled.
bool assert_enabled = false;
assert([&]() {
assert_enabled = true;
return true;
}());
if (!assert_enabled) return;
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
constexpr char kDeathMsg[] = "it != end";
EXPECT_DEATH(t.erase(t.end()), kDeathMsg);
}
#ifdef ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
// Insert something to force an allocation.
int64_t& v1 = *t.insert(0).first;
// Make sure there is something to test.
ASSERT_GT(t.capacity(), 1);
int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
for (size_t i = 0; i < t.capacity(); ++i) {
EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
}
}
TEST(Sanitizer, PoisoningOnErase) {
IntTable t;
int64_t& v = *t.insert(0).first;
EXPECT_FALSE(__asan_address_is_poisoned(&v));
t.erase(0);
EXPECT_TRUE(__asan_address_is_poisoned(&v));
}
#endif // ADDRESS_SANITIZER
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
#include <stddef.h>
#include <memory>
#include <utility>
namespace absl {
namespace container_internal {
// A class that tracks its copies and moves so that it can be queried in tests.
template <class T>
class Tracked {
public:
Tracked() {}
// NOLINTNEXTLINE(runtime/explicit)
Tracked(const T& val) : val_(val) {}
Tracked(const Tracked& that)
: val_(that.val_),
num_moves_(that.num_moves_),
num_copies_(that.num_copies_) {
++(*num_copies_);
}
Tracked(Tracked&& that)
: val_(std::move(that.val_)),
num_moves_(std::move(that.num_moves_)),
num_copies_(std::move(that.num_copies_)) {
++(*num_moves_);
}
Tracked& operator=(const Tracked& that) {
val_ = that.val_;
num_moves_ = that.num_moves_;
num_copies_ = that.num_copies_;
++(*num_copies_);
}
Tracked& operator=(Tracked&& that) {
val_ = std::move(that.val_);
num_moves_ = std::move(that.num_moves_);
num_copies_ = std::move(that.num_copies_);
++(*num_moves_);
}
const T& val() const { return val_; }
friend bool operator==(const Tracked& a, const Tracked& b) {
return a.val_ == b.val_;
}
friend bool operator!=(const Tracked& a, const Tracked& b) {
return !(a == b);
}
size_t num_copies() { return *num_copies_; }
size_t num_moves() { return *num_moves_; }
private:
T val_;
std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
#include <algorithm>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
namespace container_internal {
template <class UnordMap>
class ConstructorTest : public ::testing::Test {};
TYPED_TEST_CASE_P(ConstructorTest);
TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
}
TYPED_TEST_P(ConstructorTest, BucketCount) {
TypeParam m(123);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountHash) {
using H = typename TypeParam::hasher;
H hasher;
TypeParam m(123, hasher);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
H hasher;
E equal;
TypeParam m(123, hasher, equal);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
TypeParam m(123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, BucketAlloc) {
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
#endif
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, CopyConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
#endif
}
// TODO(alkis): Test non-propagating allocators on copy constructors.
TYPED_TEST_P(ConstructorTest, MoveConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
#endif
}
// TODO(alkis): Test non-propagating allocators on move constructors.
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(values, 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, Assignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}
// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
// (it depends on traits).
TYPED_TEST_P(ConstructorTest, MoveAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
n = std::move(t);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
n = std::move(t);
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
// We cannot test self move as standard states that it leaves standard
// containers in unspecified state (and in practice in causes memory-leak
// according to heap-checker!).
REGISTER_TYPED_TEST_CASE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
MoveAssignment, AssignmentFromInitializerList,
AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
namespace container_internal {
template <class UnordMap>
class LookupTest : public ::testing::Test {};
TYPED_TEST_CASE_P(LookupTest);
TYPED_TEST_P(LookupTest, At) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
for (const auto& p : values) {
const auto& val = m.at(p.first);
EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
}
}
TYPED_TEST_P(LookupTest, OperatorBracket) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values) {
auto& val = m[p.first];
EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
val = p.second;
}
for (const auto& p : values)
EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
}
TYPED_TEST_P(LookupTest, Count) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
m.insert(values.begin(), values.end());
for (const auto& p : values)
EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
}
TYPED_TEST_P(LookupTest, Find) {
using std::get;
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
EXPECT_TRUE(m.end() == m.find(p.first))
<< ::testing::PrintToString(p.first);
m.insert(values.begin(), values.end());
for (const auto& p : values) {
auto it = m.find(p.first);
EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
}
}
TYPED_TEST_P(LookupTest, EqualRange) {
using std::get;
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values) {
auto r = m.equal_range(p.first);
ASSERT_EQ(0, std::distance(r.first, r.second));
}
m.insert(values.begin(), values.end());
for (const auto& p : values) {
auto r = m.equal_range(p.first);
ASSERT_EQ(1, std::distance(r.first, r.second));
EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
}
}
REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
EqualRange);
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
namespace container_internal {
template <class UnordMap>
class ModifiersTest : public ::testing::Test {};
TYPED_TEST_CASE_P(ModifiersTest);
TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
m.clear();
EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(m.empty());
}
TYPED_TEST_P(ModifiersTest, Insert) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.insert(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.insert(val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}
TYPED_TEST_P(ModifiersTest, InsertHint) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto it = m.insert(m.end(), val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.insert(it, val2);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
}
TYPED_TEST_P(ModifiersTest, InsertRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
m.insert(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
#ifdef UNORDERED_MAP_CXX17
using std::get;
using K = typename TypeParam::key_type;
using V = typename TypeParam::mapped_type;
K k = hash_internal::Generator<K>()();
V val = hash_internal::Generator<V>()();
TypeParam m;
auto p = m.insert_or_assign(k, val);
EXPECT_TRUE(p.second);
EXPECT_EQ(k, get<0>(*p.first));
EXPECT_EQ(val, get<1>(*p.first));
V val2 = hash_internal::Generator<V>()();
p = m.insert_or_assign(k, val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(k, get<0>(*p.first));
EXPECT_EQ(val2, get<1>(*p.first));
#endif
}
TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
#ifdef UNORDERED_MAP_CXX17
using std::get;
using K = typename TypeParam::key_type;
using V = typename TypeParam::mapped_type;
K k = hash_internal::Generator<K>()();
V val = hash_internal::Generator<V>()();
TypeParam m;
auto it = m.insert_or_assign(m.end(), k, val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(k, get<0>(*it));
EXPECT_EQ(val, get<1>(*it));
V val2 = hash_internal::Generator<V>()();
it = m.insert_or_assign(it, k, val2);
EXPECT_EQ(k, get<0>(*it));
EXPECT_EQ(val2, get<1>(*it));
#endif
}
TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.emplace(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.emplace(val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}
TYPED_TEST_P(ModifiersTest, EmplaceHint) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.emplace_hint(m.end(), val);
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.emplace_hint(it, val2);
EXPECT_EQ(val, *it);
}
TYPED_TEST_P(ModifiersTest, TryEmplace) {
#ifdef UNORDERED_MAP_CXX17
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.try_emplace(val.first, val.second);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.try_emplace(val2.first, val2.second);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
#endif
}
TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
#ifdef UNORDERED_MAP_CXX17
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.try_emplace(m.end(), val.first, val.second);
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.try_emplace(it, val2.first, val2.second);
EXPECT_EQ(val, *it);
#endif
}
template <class V>
using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
// In openmap we chose not to return the iterator from erase because that's
// more expensive. As such we adapt erase to return an iterator here.
struct EraseFirst {
template <class Map>
auto operator()(Map* m, int) const
-> IfNotVoid<decltype(m->erase(m->begin()))> {
return m->erase(m->begin());
}
template <class Map>
typename Map::iterator operator()(Map* m, ...) const {
auto it = m->begin();
m->erase(it++);
return it;
}
};
TYPED_TEST_P(ModifiersTest, Erase) {
using T = hash_internal::GeneratedType<TypeParam>;
using std::get;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
auto& first = *m.begin();
std::vector<T> values2;
for (const auto& val : values)
if (get<0>(val) != get<0>(first)) values2.push_back(val);
auto it = EraseFirst()(&m, 0);
ASSERT_TRUE(it != m.end());
EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
values2.end()));
}
TYPED_TEST_P(ModifiersTest, EraseRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
auto it = m.erase(m.begin(), m.end());
EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(it == m.end());
}
TYPED_TEST_P(ModifiersTest, EraseKey) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_EQ(1, m.erase(values[0].first));
EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
values.end()));
}
TYPED_TEST_P(ModifiersTest, Swap) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> v1;
std::vector<T> v2;
std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
TypeParam m1(v1.begin(), v1.end());
TypeParam m2(v2.begin(), v2.end());
EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
m1.swap(m2);
EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
}
// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.
REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
InsertRange, InsertOrAssign, InsertOrAssignHint,
Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <unordered_map>
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
namespace absl {
namespace container_internal {
namespace {
using MapTypes = ::testing::Types<
std::unordered_map<int, int, StatefulTestingHash, StatefulTestingEqual,
Alloc<std::pair<const int, int>>>,
std::unordered_map<std::string, std::string, StatefulTestingHash,
StatefulTestingEqual,
Alloc<std::pair<const std::string, std::string>>>>;
INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ConstructorTest, MapTypes);
INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, LookupTest, MapTypes);
INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ModifiersTest, MapTypes);
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
#include <algorithm>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
namespace container_internal {
template <class UnordMap>
class ConstructorTest : public ::testing::Test {};
TYPED_TEST_CASE_P(ConstructorTest);
TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
}
TYPED_TEST_P(ConstructorTest, BucketCount) {
TypeParam m(123);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountHash) {
using H = typename TypeParam::hasher;
H hasher;
TypeParam m(123, hasher);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
H hasher;
E equal;
TypeParam m(123, hasher, equal);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
const auto& cm = m;
EXPECT_EQ(cm.hash_function(), hasher);
EXPECT_EQ(cm.key_eq(), equal);
EXPECT_EQ(cm.get_allocator(), alloc);
EXPECT_TRUE(cm.empty());
EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
EXPECT_GE(cm.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
TypeParam m(123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, BucketAlloc) {
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
#endif
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, CopyConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
#endif
}
// TODO(alkis): Test non-propagating allocators on copy constructors.
TYPED_TEST_P(ConstructorTest, MoveConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
#endif
}
// TODO(alkis): Test non-propagating allocators on move constructors.
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(values, 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}
TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
#endif
}
TYPED_TEST_P(ConstructorTest, Assignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}
// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
// (it depends on traits).
TYPED_TEST_P(ConstructorTest, MoveAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
n = std::move(t);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
n = std::move(t);
EXPECT_EQ(m, n);
}
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign.
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
REGISTER_TYPED_TEST_CASE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
MoveAssignment, AssignmentFromInitializerList,
AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
namespace container_internal {
template <class UnordSet>
class LookupTest : public ::testing::Test {};
TYPED_TEST_CASE_P(LookupTest);
TYPED_TEST_P(LookupTest, Count) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
m.insert(values.begin(), values.end());
for (const auto& v : values)
EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
}
TYPED_TEST_P(LookupTest, Find) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
m.insert(values.begin(), values.end());
for (const auto& v : values) {
typename TypeParam::iterator it = m.find(v);
static_assert(std::is_same<const typename TypeParam::value_type&,
decltype(*it)>::value,
"");
static_assert(std::is_same<const typename TypeParam::value_type*,
decltype(it.operator->())>::value,
"");
EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
}
}
TYPED_TEST_P(LookupTest, EqualRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values) {
auto r = m.equal_range(v);
ASSERT_EQ(0, std::distance(r.first, r.second));
}
m.insert(values.begin(), values.end());
for (const auto& v : values) {
auto r = m.equal_range(v);
ASSERT_EQ(1, std::distance(r.first, r.second));
EXPECT_EQ(v, *r.first);
}
}
REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
namespace container_internal {
template <class UnordSet>
class ModifiersTest : public ::testing::Test {};
TYPED_TEST_CASE_P(ModifiersTest);
TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
m.clear();
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(m.empty());
}
TYPED_TEST_P(ModifiersTest, Insert) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.insert(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
p = m.insert(val);
EXPECT_FALSE(p.second);
}
TYPED_TEST_P(ModifiersTest, InsertHint) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto it = m.insert(m.end(), val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
it = m.insert(it, val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
}
TYPED_TEST_P(ModifiersTest, InsertRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
m.insert(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.emplace(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
p = m.emplace(val);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}
TYPED_TEST_P(ModifiersTest, EmplaceHint) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.emplace_hint(m.end(), val);
EXPECT_EQ(val, *it);
it = m.emplace_hint(it, val);
EXPECT_EQ(val, *it);
}
template <class V>
using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
// In openmap we chose not to return the iterator from erase because that's
// more expensive. As such we adapt erase to return an iterator here.
struct EraseFirst {
template <class Map>
auto operator()(Map* m, int) const
-> IfNotVoid<decltype(m->erase(m->begin()))> {
return m->erase(m->begin());
}
template <class Map>
typename Map::iterator operator()(Map* m, ...) const {
auto it = m->begin();
m->erase(it++);
return it;
}
};
TYPED_TEST_P(ModifiersTest, Erase) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
std::vector<T> values2;
for (const auto& val : values)
if (val != *m.begin()) values2.push_back(val);
auto it = EraseFirst()(&m, 0);
ASSERT_TRUE(it != m.end());
EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
values2.end()));
}
TYPED_TEST_P(ModifiersTest, EraseRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
auto it = m.erase(m.begin(), m.end());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(it == m.end());
}
TYPED_TEST_P(ModifiersTest, EraseKey) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_EQ(1, m.erase(values[0]));
EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
values.end()));
}
TYPED_TEST_P(ModifiersTest, Swap) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> v1;
std::vector<T> v2;
std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
TypeParam m1(v1.begin(), v1.end());
TypeParam m2(v2.begin(), v2.end());
EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
m1.swap(m2);
EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
}
// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.
REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
EraseKey, Swap);
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <unordered_set>
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
namespace absl {
namespace container_internal {
namespace {
using SetTypes =
::testing::Types<std::unordered_set<int, StatefulTestingHash,
StatefulTestingEqual, Alloc<int>>,
std::unordered_set<std::string, StatefulTestingHash,
StatefulTestingEqual, Alloc<std::string>>>;
INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ConstructorTest, SetTypes);
INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, LookupTest, SetTypes);
INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ModifiersTest, SetTypes);
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: node_hash_map.h
// -----------------------------------------------------------------------------
//
// An `absl::node_hash_map<K, V>` is an unordered associative container of
// unique keys and associated values designed to be a more efficient replacement
// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
// deletion of map elements can be done as an `O(1)` operation. However,
// `node_hash_map` (and other unordered associative containers known as the
// collection of Abseil "Swiss tables") contain other optimizations that result
// in both memory and computation advantages.
//
// In most cases, your default choice for a hash map should be a map of type
// `flat_hash_map`. However, if you need pointer stability and cannot store
// a `flat_hash_map` with `unique_ptr` elements, a `node_hash_map` may be a
// valid alternative. As well, if you are migrating your code from using
// `std::unordered_map`, a `node_hash_map` provides a more straightforward
// migration, because it guarantees pointer stability. Consider migrating to
// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map`
// upon further review.
#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_
#define ABSL_CONTAINER_NODE_HASH_MAP_H_
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/node_hash_policy.h"
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
namespace absl {
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy;
} // namespace container_internal
// -----------------------------------------------------------------------------
// absl::node_hash_map
// -----------------------------------------------------------------------------
//
// An `absl::node_hash_map<K, V>` is an unordered associative container which
// has been optimized for both speed and memory footprint in most common use
// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
// the following notable differences:
//
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the map is provided a compatible heterogeneous
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash map.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `node_hash_map` uses the `absl::Hash` hashing framework.
// All fundamental and Abseil types that support the `absl::Hash` framework have
// a compatible equality operator for comparing insertions into `node_hash_map`.
// If your type is not yet supported by the `asbl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// Example:
//
// // Create a node hash map of three strings (that map to strings)
// absl::node_hash_map<std::string, std::string> ducks =
// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
//
// // Insert a new element into the node hash map
// ducks.insert({"d", "donald"}};
//
// // Force a rehash of the node hash map
// ducks.rehash(0);
//
// // Find the element with the key "b"
// std::string search_key = "b";
// auto result = ducks.find(search_key);
// if (result != ducks.end()) {
// std::cout << "Result: " << search_key->second << std::endl;
// }
template <class Key, class Value,
class Hash = absl::container_internal::hash_default_hash<Key>,
class Eq = absl::container_internal::hash_default_eq<Key>,
class Alloc = std::allocator<std::pair<const Key, Value>>>
class node_hash_map
: public absl::container_internal::raw_hash_map<
absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq,
Alloc> {
using Base = typename node_hash_map::raw_hash_map;
public:
node_hash_map() {}
using Base::Base;
// node_hash_map::begin()
//
// Returns an iterator to the beginning of the `node_hash_map`.
using Base::begin;
// node_hash_map::cbegin()
//
// Returns a const iterator to the beginning of the `node_hash_map`.
using Base::cbegin;
// node_hash_map::cend()
//
// Returns a const iterator to the end of the `node_hash_map`.
using Base::cend;
// node_hash_map::end()
//
// Returns an iterator to the end of the `node_hash_map`.
using Base::end;
// node_hash_map::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `node_hash_map`.
//
// NOTE: this member function is particular to `absl::node_hash_map` and is
// not provided in the `std::unordered_map` API.
using Base::capacity;
// node_hash_map::empty()
//
// Returns whether or not the `node_hash_map` is empty.
using Base::empty;
// node_hash_map::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `node_hash_map` under current memory constraints. This value can be thought
// of as the largest value of `std::distance(begin(), end())` for a
// `node_hash_map<K, V>`.
using Base::max_size;
// node_hash_map::size()
//
// Returns the number of elements currently within the `node_hash_map`.
using Base::size;
// node_hash_map::clear()
//
// Removes all elements from the `node_hash_map`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;
// node_hash_map::erase()
//
// Erases elements within the `node_hash_map`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `node_hash_map`, returning
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_map` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists.
using Base::erase;
// node_hash_map::insert()
//
// Inserts an element of the specified value into the `node_hash_map`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const init_type& value):
//
// Inserts a value into the `node_hash_map`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a `bool` denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
// std::pair<iterator,bool> insert(init_type&& value ):
//
// Inserts a moveable value into the `node_hash_map`. Returns a `std::pair`
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a `bool` denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const init_type& value):
// iterator insert(const_iterator hint, T&& value):
// iterator insert(const_iterator hint, init_type&& value );
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last ):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `node_hash_map` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<init_type> ilist ):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `node_hash_map` we guarantee the first match is inserted.
using Base::insert;
// node_hash_map::insert_or_assign()
//
// Inserts an element of the specified value into the `node_hash_map` provided
// that a value with the given key does not already exist, or replaces it with
// the element value if a key for that value already exists, returning an
// iterator pointing to the newly inserted element. If rehashing occurs due to
// the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
// std::pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
//
// Inserts/Assigns (or moves) the element of the specified key into the
// `node_hash_map`.
//
// iterator insert_or_assign(const_iterator hint,
// const init_type& k, T&& obj):
// iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
//
// Inserts/Assigns (or moves) the element of the specified key into the
// `node_hash_map` using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search.
using Base::insert_or_assign;
// node_hash_map::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_map`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;
// node_hash_map::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_map`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;
// node_hash_map::try_emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_map`, provided that no element with the given key
// already exists. Unlike `emplace()`, if an element with the given key
// already exists, we guarantee that no element is constructed.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
// Overloads are listed below.
//
// std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
// std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `node_hash_map`.
//
// iterator try_emplace(const_iterator hint,
// const init_type& k, Args&&... args):
// iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `node_hash_map` using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search.
using Base::try_emplace;
// node_hash_map::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the key,value pair of the element at the indicated position and
// returns a node handle owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the key,value pair of the element with a key matching the passed
// key value and returns a node handle owning that extracted data. If the
// `node_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
using Base::extract;
// node_hash_map::merge()
//
// Extracts elements from a given `source` node hash map into this
// `node_hash_map`. If the destination `node_hash_map` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
// node_hash_map::swap(node_hash_map& other)
//
// Exchanges the contents of this `node_hash_map` with those of the `other`
// node hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `node_hash_map` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the node hash map's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;
// node_hash_map::rehash(count)
//
// Rehashes the `node_hash_map`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
using Base::rehash;
// node_hash_map::reserve(count)
//
// Sets the number of slots in the `node_hash_map` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;
// node_hash_map::at()
//
// Returns a reference to the mapped value of the element with key equivalent
// to the passed key.
using Base::at;
// node_hash_map::contains()
//
// Determines whether an element with a key comparing equal to the given `key`
// exists within the `node_hash_map`, returning `true` if so or `false`
// otherwise.
using Base::contains;
// node_hash_map::count(const Key& key) const
//
// Returns the number of elements with a key comparing equal to the given
// `key` within the `node_hash_map`. note that this function will return
// either `1` or `0` since duplicate keys are not allowed within a
// `node_hash_map`.
using Base::count;
// node_hash_map::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `node_hash_map`.
using Base::equal_range;
// node_hash_map::find()
//
// Finds an element with the passed `key` within the `node_hash_map`.
using Base::find;
// node_hash_map::operator[]()
//
// Returns a reference to the value mapped to the passed key within the
// `node_hash_map`, performing an `insert()` if the key does not already
// exist. If an insertion occurs and results in a rehashing of the container,
// all iterators are invalidated. Otherwise iterators are not affected and
// references are not invalidated. Overloads are listed below.
//
// T& operator[](const Key& key ):
//
// Inserts an init_type object constructed in-place if the element with the
// given key does not exist.
//
// T& operator[]( Key&& key ):
//
// Inserts an init_type object constructed in-place provided that an element
// with the given key does not exist.
using Base::operator[];
// node_hash_map::bucket_count()
//
// Returns the number of "buckets" within the `node_hash_map`.
using Base::bucket_count;
// node_hash_map::load_factor()
//
// Returns the current load factor of the `node_hash_map` (the average number
// of slots occupied with a value within the hash map).
using Base::load_factor;
// node_hash_map::max_load_factor()
//
// Manages the maximum load factor of the `node_hash_map`. Overloads are
// listed below.
//
// float node_hash_map::max_load_factor()
//
// Returns the current maximum load factor of the `node_hash_map`.
//
// void node_hash_map::max_load_factor(float ml)
//
// Sets the maximum load factor of the `node_hash_map` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `node_hash_map` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;
// node_hash_map::get_allocator()
//
// Returns the allocator function associated with this `node_hash_map`.
using Base::get_allocator;
// node_hash_map::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `node_hash_map`.
using Base::hash_function;
// node_hash_map::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
ABSL_DEPRECATED("Call `hash_function()` instead.")
typename Base::hasher hash_funct() { return this->hash_function(); }
ABSL_DEPRECATED("Call `rehash()` instead.")
void resize(typename Base::size_type hint) { this->rehash(hint); }
};
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy
: public absl::container_internal::node_hash_policy<
std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
using value_type = std::pair<const Key, Value>;
public:
using key_type = Key;
using mapped_type = Value;
using init_type = std::pair</*non const*/ key_type, mapped_type>;
template <class Allocator, class... Args>
static value_type* new_element(Allocator* alloc, Args&&... args) {
using PairAlloc = typename absl::allocator_traits<
Allocator>::template rebind_alloc<value_type>;
PairAlloc pair_alloc(*alloc);
value_type* res =
absl::allocator_traits<PairAlloc>::allocate(pair_alloc, 1);
absl::allocator_traits<PairAlloc>::construct(pair_alloc, res,
std::forward<Args>(args)...);
return res;
}
template <class Allocator>
static void delete_element(Allocator* alloc, value_type* pair) {
using PairAlloc = typename absl::allocator_traits<
Allocator>::template rebind_alloc<value_type>;
PairAlloc pair_alloc(*alloc);
absl::allocator_traits<PairAlloc>::destroy(pair_alloc, pair);
absl::allocator_traits<PairAlloc>::deallocate(pair_alloc, pair, 1);
}
template <class F, class... Args>
static decltype(absl::container_internal::DecomposePair(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposePair(std::forward<F>(f),
std::forward<Args>(args)...);
}
static size_t element_space_used(const value_type*) {
return sizeof(value_type);
}
static Value& value(value_type* elem) { return elem->second; }
static const Value& value(const value_type* elem) { return elem->second; }
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/node_hash_map.h"
#include "absl/container/internal/tracked.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
namespace absl {
namespace container_internal {
namespace {
using ::testing::Field;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using MapTypes = ::testing::Types<
absl::node_hash_map<int, int, StatefulTestingHash, StatefulTestingEqual,
Alloc<std::pair<const int, int>>>,
absl::node_hash_map<std::string, std::string, StatefulTestingHash,
StatefulTestingEqual,
Alloc<std::pair<const std::string, std::string>>>>;
INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, ConstructorTest, MapTypes);
INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, LookupTest, MapTypes);
INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, ModifiersTest, MapTypes);
using M = absl::node_hash_map<std::string, Tracked<int>>;
TEST(NodeHashMap, Emplace) {
M m;
Tracked<int> t(53);
m.emplace("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
m.emplace(std::string("a"), t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
std::string a("a");
m.emplace(a, t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
const std::string ca("a");
m.emplace(a, t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
m.emplace(std::make_pair("a", t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(2, t.num_copies());
m.emplace(std::make_pair(std::string("a"), t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(3, t.num_copies());
std::pair<std::string, Tracked<int>> p("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(4, t.num_copies());
m.emplace(p);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(4, t.num_copies());
const std::pair<std::string, Tracked<int>> cp("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(5, t.num_copies());
m.emplace(cp);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(5, t.num_copies());
std::pair<const std::string, Tracked<int>> pc("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(6, t.num_copies());
m.emplace(pc);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(6, t.num_copies());
const std::pair<const std::string, Tracked<int>> cpc("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
m.emplace(cpc);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
m.emplace(std::piecewise_construct, std::forward_as_tuple("a"),
std::forward_as_tuple(t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
m.emplace(std::piecewise_construct, std::forward_as_tuple(std::string("a")),
std::forward_as_tuple(t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
}
TEST(NodeHashMap, AssignRecursive) {
struct Tree {
// Verify that unordered_map<K, IncompleteType> can be instantiated.
absl::node_hash_map<int, Tree> children;
};
Tree root;
const Tree& child = root.children.emplace().first->second;
// Verify that `lhs = rhs` doesn't read rhs after clearing lhs.
root = child;
}
TEST(FlatHashMap, MoveOnlyKey) {
struct Key {
Key() = default;
Key(Key&&) = default;
Key& operator=(Key&&) = default;
};
struct Eq {
bool operator()(const Key&, const Key&) const { return true; }
};
struct Hash {
size_t operator()(const Key&) const { return 0; }
};
absl::node_hash_map<Key, int, Hash, Eq> m;
m[Key()];
}
struct NonMovableKey {
explicit NonMovableKey(int i) : i(i) {}
NonMovableKey(NonMovableKey&&) = delete;
int i;
};
struct NonMovableKeyHash {
using is_transparent = void;
size_t operator()(const NonMovableKey& k) const { return k.i; }
size_t operator()(int k) const { return k; }
};
struct NonMovableKeyEq {
using is_transparent = void;
bool operator()(const NonMovableKey& a, const NonMovableKey& b) const {
return a.i == b.i;
}
bool operator()(const NonMovableKey& a, int b) const { return a.i == b; }
};
TEST(NodeHashMap, MergeExtractInsert) {
absl::node_hash_map<NonMovableKey, int, NonMovableKeyHash, NonMovableKeyEq>
set1, set2;
set1.emplace(std::piecewise_construct, std::make_tuple(7),
std::make_tuple(-7));
set1.emplace(std::piecewise_construct, std::make_tuple(17),
std::make_tuple(-17));
set2.emplace(std::piecewise_construct, std::make_tuple(7),
std::make_tuple(-70));
set2.emplace(std::piecewise_construct, std::make_tuple(19),
std::make_tuple(-190));
auto Elem = [](int key, int value) {
return Pair(Field(&NonMovableKey::i, key), value);
};
EXPECT_THAT(set1, UnorderedElementsAre(Elem(7, -7), Elem(17, -17)));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(19, -190)));
// NonMovableKey is neither copyable nor movable. We should still be able to
// move nodes around.
static_assert(!std::is_move_constructible<NonMovableKey>::value, "");
set1.merge(set2);
EXPECT_THAT(set1,
UnorderedElementsAre(Elem(7, -7), Elem(17, -17), Elem(19, -190)));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70)));
auto node = set1.extract(7);
EXPECT_TRUE(node);
EXPECT_EQ(node.key().i, 7);
EXPECT_EQ(node.mapped(), -7);
EXPECT_THAT(set1, UnorderedElementsAre(Elem(17, -17), Elem(19, -190)));
auto insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_FALSE(insert_result.inserted);
EXPECT_TRUE(insert_result.node);
EXPECT_EQ(insert_result.node.key().i, 7);
EXPECT_EQ(insert_result.node.mapped(), -7);
EXPECT_THAT(*insert_result.position, Elem(7, -70));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70)));
node = set1.extract(17);
EXPECT_TRUE(node);
EXPECT_EQ(node.key().i, 17);
EXPECT_EQ(node.mapped(), -17);
EXPECT_THAT(set1, UnorderedElementsAre(Elem(19, -190)));
node.mapped() = 23;
insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_TRUE(insert_result.inserted);
EXPECT_FALSE(insert_result.node);
EXPECT_THAT(*insert_result.position, Elem(17, 23));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(17, 23)));
}
} // namespace
} // namespace container_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: node_hash_set.h
// -----------------------------------------------------------------------------
//
// An `absl::node_hash_set<T>` is an unordered associative container designed to
// be a more efficient replacement for `std::unordered_set`. Like
// `unordered_set`, search, insertion, and deletion of map elements can be done
// as an `O(1)` operation. However, `node_hash_set` (and other unordered
// associative containers known as the collection of Abseil "Swiss tables")
// contain other optimizations that result in both memory and computation
// advantages.
//
// In most cases, your default choice for a hash table should be a map of type
// `flat_hash_map` or a set of type `flat_hash_set`. However, if you need
// pointer stability, a `node_hash_set` should be your preferred choice. As
// well, if you are migrating your code from using `std::unordered_set`, a
// `node_hash_set` should be an easy migration. Consider migrating to
// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set`
// upon further review.
#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_
#define ABSL_CONTAINER_NODE_HASH_SET_H_
#include <type_traits>
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/node_hash_policy.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
namespace absl {
namespace container_internal {
template <typename T>
struct NodeHashSetPolicy;
} // namespace container_internal
// -----------------------------------------------------------------------------
// absl::node_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::node_hash_set<T>` is an unordered associative container which
// has been optimized for both speed and memory footprint in most common use
// cases. Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the map is provided a compatible heterogeneous
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `node_hash_set` uses the `absl::Hash` hashing framework.
// All fundamental and Abseil types that support the `absl::Hash` framework have
// a compatible equality operator for comparing insertions into `node_hash_set`.
// If your type is not yet supported by the `asbl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// Example:
//
// // Create a node hash set of three strings
// absl::node_hash_map<std::string, std::string> ducks =
// {"huey", "dewey"}, "louie"};
//
// // Insert a new element into the node hash map
// ducks.insert("donald"};
//
// // Force a rehash of the node hash map
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
class Alloc = std::allocator<T>>
class node_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> {
using Base = typename node_hash_set::raw_hash_set;
public:
node_hash_set() {}
using Base::Base;
// node_hash_set::begin()
//
// Returns an iterator to the beginning of the `node_hash_set`.
using Base::begin;
// node_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `node_hash_set`.
using Base::cbegin;
// node_hash_set::cend()
//
// Returns a const iterator to the end of the `node_hash_set`.
using Base::cend;
// node_hash_set::end()
//
// Returns an iterator to the end of the `node_hash_set`.
using Base::end;
// node_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `node_hash_set`.
//
// NOTE: this member function is particular to `absl::node_hash_set` and is
// not provided in the `std::unordered_map` API.
using Base::capacity;
// node_hash_set::empty()
//
// Returns whether or not the `node_hash_set` is empty.
using Base::empty;
// node_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `node_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `node_hash_set<T>`.
using Base::max_size;
// node_hash_set::size()
//
// Returns the number of elements currently within the `node_hash_set`.
using Base::size;
// node_hash_set::clear()
//
// Removes all elements from the `node_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;
// node_hash_set::erase()
//
// Erases elements within the `node_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `node_hash_set`, returning
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_map` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists.
using Base::erase;
// node_hash_set::insert()
//
// Inserts an element of the specified value into the `node_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `node_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `node_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last ):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `node_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist ):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `node_hash_set` we guarantee the first match is inserted.
using Base::insert;
// node_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;
// node_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;
// node_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `node_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;
// node_hash_set::merge()
//
// Extracts elements from a given `source` flat hash map into this
// `node_hash_set`. If the destination `node_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
// node_hash_set::swap(node_hash_set& other)
//
// Exchanges the contents of this `node_hash_set` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;
// node_hash_set::rehash(count)
//
// Rehashes the `node_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;
// node_hash_set::reserve(count)
//
// Sets the number of slots in the `node_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;
// node_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `node_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;
// node_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `node_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `node_hash_set`.
using Base::count;
// node_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `node_hash_set`.
using Base::equal_range;
// node_hash_set::find()
//
// Finds an element with the passed `key` within the `node_hash_set`.
using Base::find;
// node_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `node_hash_set`. Note that
// because a flat hash map contains all elements within its internal storage,
// this value simply equals the current capacity of the `node_hash_set`.
using Base::bucket_count;
// node_hash_set::load_factor()
//
// Returns the current load factor of the `node_hash_set` (the average number
// of slots occupied with a value within the hash map).
using Base::load_factor;
// node_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `node_hash_set`. Overloads are
// listed below.
//
// float node_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `node_hash_set`.
//
// void node_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `node_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `node_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;
// node_hash_set::get_allocator()
//
// Returns the allocator function associated with this `node_hash_set`.
using Base::get_allocator;
// node_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `node_hash_set`.
using Base::hash_function;
// node_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
ABSL_DEPRECATED("Call `hash_function()` instead.")
typename Base::hasher hash_funct() { return this->hash_function(); }
ABSL_DEPRECATED("Call `rehash()` instead.")
void resize(typename Base::size_type hint) { this->rehash(hint); }
};
namespace container_internal {
template <class T>
struct NodeHashSetPolicy
: absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> {
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
template <class Allocator, class... Args>
static T* new_element(Allocator* alloc, Args&&... args) {
using ValueAlloc =
typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
ValueAlloc value_alloc(*alloc);
T* res = absl::allocator_traits<ValueAlloc>::allocate(value_alloc, 1);
absl::allocator_traits<ValueAlloc>::construct(value_alloc, res,
std::forward<Args>(args)...);
return res;
}
template <class Allocator>
static void delete_element(Allocator* alloc, T* elem) {
using ValueAlloc =
typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
ValueAlloc value_alloc(*alloc);
absl::allocator_traits<ValueAlloc>::destroy(value_alloc, elem);
absl::allocator_traits<ValueAlloc>::deallocate(value_alloc, elem, 1);
}
template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}
static size_t element_space_used(const T*) { return sizeof(T); }
};
} // namespace container_internal
} // namespace absl
#endif // ABSL_CONTAINER_NODE_HASH_SET_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/node_hash_set.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
namespace absl {
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
using ::absl::container_internal::hash_internal::EnumClass;
using ::testing::Pointee;
using ::testing::UnorderedElementsAre;
using SetTypes = ::testing::Types<
node_hash_set<int, StatefulTestingHash, StatefulTestingEqual, Alloc<int>>,
node_hash_set<std::string, StatefulTestingHash, StatefulTestingEqual,
Alloc<int>>,
node_hash_set<Enum, StatefulTestingHash, StatefulTestingEqual, Alloc<Enum>>,
node_hash_set<EnumClass, StatefulTestingHash, StatefulTestingEqual,
Alloc<EnumClass>>>;
INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, ConstructorTest, SetTypes);
INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, LookupTest, SetTypes);
INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, ModifiersTest, SetTypes);
TEST(NodeHashSet, MoveableNotCopyableCompiles) {
node_hash_set<std::unique_ptr<void*>> t;
node_hash_set<std::unique_ptr<void*>> u;
u = std::move(t);
}
TEST(NodeHashSet, MergeExtractInsert) {
struct Hash {
size_t operator()(const std::unique_ptr<int>& p) const { return *p; }
};
struct Eq {
bool operator()(const std::unique_ptr<int>& a,
const std::unique_ptr<int>& b) const {
return *a == *b;
}
};
absl::node_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2;
set1.insert(absl::make_unique<int>(7));
set1.insert(absl::make_unique<int>(17));
set2.insert(absl::make_unique<int>(7));
set2.insert(absl::make_unique<int>(19));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17)));
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19)));
set1.merge(set2);
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19)));
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
auto node = set1.extract(absl::make_unique<int>(7));
EXPECT_TRUE(node);
EXPECT_THAT(node.value(), Pointee(7));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19)));
auto insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_FALSE(insert_result.inserted);
EXPECT_TRUE(insert_result.node);
EXPECT_THAT(insert_result.node.value(), Pointee(7));
EXPECT_EQ(**insert_result.position, 7);
EXPECT_NE(insert_result.position->get(), insert_result.node.value().get());
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
node = set1.extract(absl::make_unique<int>(17));
EXPECT_TRUE(node);
EXPECT_THAT(node.value(), Pointee(17));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19)));
node.value() = absl::make_unique<int>(23);
insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_TRUE(insert_result.inserted);
EXPECT_FALSE(insert_result.node);
EXPECT_EQ(**insert_result.position, 23);
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23)));
}
} // namespace
} // namespace container_internal
} // namespace absl
...@@ -117,6 +117,7 @@ MSVC_FLAGS = [ ...@@ -117,6 +117,7 @@ MSVC_FLAGS = [
"/W3", "/W3",
"/wd4005", # macro-redefinition "/wd4005", # macro-redefinition
"/wd4068", # unknown pragma "/wd4068", # unknown pragma
"/wd4180", # qualifier applied to function type has no meaning; ignored
"/wd4244", # conversion from 'type1' to 'type2', possible loss of data "/wd4244", # conversion from 'type1' to 'type2', possible loss of data
"/wd4267", # conversion from 'size_t' to 'type', possible loss of data "/wd4267", # conversion from 'size_t' to 'type', possible loss of data
"/wd4800", # forcing value to bool 'true' or 'false' (performance warning) "/wd4800", # forcing value to bool 'true' or 'false' (performance warning)
......
#
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
load(
"//absl:copts.bzl",
"ABSL_DEFAULT_COPTS",
"ABSL_TEST_COPTS",
)
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
cc_library(
name = "hash",
srcs = [
"internal/hash.cc",
"internal/hash.h",
],
hdrs = ["hash.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
":city",
"//absl/base:core_headers",
"//absl/base:endian",
"//absl/container:fixed_array",
"//absl/meta:type_traits",
"//absl/numeric:int128",
"//absl/strings",
"//absl/types:optional",
"//absl/types:variant",
"//absl/utility",
],
)
cc_library(
name = "hash_testing",
testonly = 1,
hdrs = ["hash_testing.h"],
deps = [
":spy_hash_state",
"//absl/meta:type_traits",
"//absl/strings",
"//absl/types:variant",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "hash_test",
srcs = ["hash_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":hash",
":hash_testing",
"//absl/base:core_headers",
"//absl/container:flat_hash_set",
"//absl/hash:spy_hash_state",
"//absl/meta:type_traits",
"//absl/numeric:int128",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "spy_hash_state",
testonly = 1,
hdrs = ["internal/spy_hash_state.h"],
copts = ABSL_DEFAULT_COPTS,
visibility = ["//visibility:private"],
deps = [
":hash",
"//absl/strings",
"//absl/strings:str_format",
],
)
cc_library(
name = "city",
srcs = ["internal/city.cc"],
hdrs = [
"internal/city.h",
"internal/city_crc.h",
],
copts = ABSL_DEFAULT_COPTS,
deps = [
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
],
)
cc_test(
name = "city_test",
srcs = ["internal/city_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":city",
"@com_google_googletest//:gtest_main",
],
)
#
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
list(APPEND HASH_PUBLIC_HEADERS
"hash.h"
)
list(APPEND HASH_INTERNAL_HEADERS
"internal/city.h"
"internal/city_crc.h"
"internal/hash.h"
)
# absl_hash library
list(APPEND HASH_SRC
"internal/city.cc"
"internal/hash.cc"
${HASH_PUBLIC_HEADERS}
${HASH_INTERNAL_HEADERS}
)
set(HASH_PUBLIC_LIBRARIES absl::hash absl::container absl::strings absl::str_format absl::utility)
absl_library(
TARGET
absl_hash
SOURCES
${HASH_SRC}
PUBLIC_LIBRARIES
${HASH_PUBLIC_LIBRARIES}
EXPORT_NAME
hash
)
#
## TESTS
#
# testing support
set(HASH_TEST_HEADERS hash_testing.h internal/spy_hash_state.h)
set(HASH_TEST_PUBLIC_LIBRARIES absl::hash absl::container absl::numeric absl::strings absl::str_format)
# hash_test
set(HASH_TEST_SRC "hash_test.cc" ${HASH_TEST_HEADERS})
absl_test(
TARGET
hash_test
SOURCES
${HASH_TEST_SRC}
PUBLIC_LIBRARIES
${HASH_TEST_PUBLIC_LIBRARIES}
)
# hash_test
set(CITY_TEST_SRC "internal/city_test.cc")
absl_test(
TARGET
city_test
SOURCES
${CITY_TEST_SRC}
PUBLIC_LIBRARIES
${HASH_TEST_PUBLIC_LIBRARIES}
)
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: hash.h
// -----------------------------------------------------------------------------
//
// This header file defines the Abseil `hash` library and the Abseil hashing
// framework. This framework consists of the following:
//
// * The `absl::Hash` functor, which is used to invoke the hasher within the
// Abseil hashing framework. `absl::Hash<T>` supports most basic types and
// a number of Abseil types out of the box.
// * `AbslHashValue`, an extension point that allows you to extend types to
// support Abseil hashing without requiring you to define a hashing
// algorithm.
// * `HashState`, a type-erased class which implement the manipulation of the
// hash state (H) itself. containing member functions `combine()` and
// `combine_contiguous()`, which you can use to contribute to an existing
// hash state when hashing your types.
//
// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework
// provides most of its utility by abstracting away the hash algorithm (and its
// implementation) entirely. Instead, a type invokes the Abseil hashing
// framework by simply combining its state with the state of known, hashable
// types. Hashing of that combined state is separately done by `absl::Hash`.
//
// Example:
//
// // Suppose we have a class `Circle` for which we want to add hashing
// class Circle {
// public:
// ...
// private:
// std::pair<int, int> center_;
// int radius_;
// };
//
// // To add hashing support to `Circle`, we simply need to add an ordinary
// // function `AbslHashValue()`, and return the combined hash state of the
// // existing hash state and the class state:
//
// template <typename H>
// friend H AbslHashValue(H h, const Circle& c) {
// return H::combine(std::move(h), c.center_, c.radius_);
// }
//
// For more information, see Adding Type Support to `absl::Hash` below.
//
#ifndef ABSL_HASH_HASH_H_
#define ABSL_HASH_HASH_H_
#include "absl/hash/internal/hash.h"
namespace absl {
// -----------------------------------------------------------------------------
// `absl::Hash`
// -----------------------------------------------------------------------------
//
// `absl::Hash<T>` is a convenient general-purpose hash functor for a type `T`
// satisfying any of the following conditions (in order):
//
// * T is an arithmetic or pointer type
// * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary
// hash state `H`.
// - T defines a specialization of `HASH_NAMESPACE::hash<T>`
// - T defines a specialization of `std::hash<T>`
//
// `absl::Hash` intrinsically supports the following types:
//
// * All integral types (including bool)
// * All enum types
// * All floating-point types (although hashing them is discouraged)
// * All pointer types, including nullptr_t
// * std::pair<T1, T2>, if T1 and T2 are hashable
// * std::tuple<Ts...>, if all the Ts... are hashable
// * std::unique_ptr and std::shared_ptr
// * All string-like types including:
// * std::string
// * std::string_view (as well as any instance of std::basic_string that
// uses char and std::char_traits)
// * All the standard sequence containers (provided the elements are hashable)
// * All the standard ordered associative containers (provided the elements are
// hashable)
// * absl types such as the following:
// * absl::string_view
// * absl::InlinedVector
// * absl::FixedArray
// * absl::unit128
// * absl::Time, absl::Duration, and absl::TimeZone
//
// Note: the list above is not meant to be exhaustive. Additional type support
// may be added, in which case the above list will be updated.
//
// -----------------------------------------------------------------------------
// absl::Hash Invocation Evaluation
// -----------------------------------------------------------------------------
//
// When invoked, `absl::Hash<T>` searches for supplied hash functions in the
// following order:
//
// * Natively supported types out of the box (see above)
// * Types for which an `AbslHashValue()` overload is provided (such as
// user-defined types). See "Adding Type Support to `absl::Hash`" below.
// * Types which define a `HASH_NAMESPACE::hash<T>` specialization (aka
// `__gnu_cxx::hash<T>` for gcc/Clang or `stdext::hash<T>` for MSVC)
// * Types which define a `std::hash<T>` specialization
//
// The fallback to legacy hash functions exists mainly for backwards
// compatibility. If you have a choice, prefer defining an `AbslHashValue`
// overload instead of specializing any legacy hash functors.
//
// -----------------------------------------------------------------------------
// The Hash State Concept, and using `HashState` for Type Erasure
// -----------------------------------------------------------------------------
//
// The `absl::Hash` framework relies on the Concept of a "hash state." Such a
// hash state is used in several places:
//
// * Within existing implementations of `absl::Hash<T>` to store the hashed
// state of an object. Note that it is up to the implementation how it stores
// such state. A hash table, for example, may mix the state to produce an
// integer value; a testing framework may simply hold a vector of that state.
// * Within implementations of `AbslHashValue()` used to extend user-defined
// types. (See "Adding Type Support to absl::Hash" below.)
// * Inside a `HashState`, providing type erasure for the concept of a hash
// state, which you can use to extend the `absl::Hash` framework for types
// that are otherwise difficult to extend using `AbslHashValue()`. (See the
// `HashState` class below.)
//
// The "hash state" concept contains two member functions for mixing hash state:
//
// * `H::combine()`
//
// Combines an arbitrary number of values into a hash state, returning the
// updated state. Note that the existing hash state is move-only and must be
// passed by value.
//
// Each of the value types T must be hashable by H.
//
// NOTE:
//
// state = H::combine(std::move(state), value1, value2, value3);
//
// must be guaranteed to produce the same hash expansion as
//
// state = H::combine(std::move(state), value1);
// state = H::combine(std::move(state), value2);
// state = H::combine(std::move(state), value3);
//
// * `H::combine_contiguous()`
//
// Combines a contiguous array of `size` elements into a hash state,
// returning the updated state. Note that the existing hash state is
// move-only and must be passed by value.
//
// NOTE:
//
// state = H::combine_contiguous(std::move(state), data, size);
//
// need NOT be guaranteed to produce the same hash expansion as a loop
// (it may perform internal optimizations). If you need this guarantee, use a
// loop instead.
//
// -----------------------------------------------------------------------------
// Adding Type Support to `absl::Hash`
// -----------------------------------------------------------------------------
//
// To add support for your user-defined type, add a proper `AbslHashValue()`
// overload as a free (non-member) function. The overload will take an
// existing hash state and should combine that state with state from the type.
//
// Example:
//
// template <typename H>
// H AbslHashValue(H state, const MyType& v) {
// return H::combine(std::move(state), v.field1, ..., v.fieldN);
// }
//
// where `(field1, ..., fieldN)` are the members you would use on your
// `operator==` to define equality.
//
// Notice that `AbslHashValue` is not a class member, but an ordinary function.
// An `AbslHashValue` overload for a type should only be declared in the same
// file and namespace as said type. The proper `AbslHashValue` implementation
// for a given type will be discovered via ADL.
//
// Note: unlike `std::hash', `absl::Hash` should never be specialized. It must
// only be extended by adding `AbslHashValue()` overloads.
//
template <typename T>
using Hash = absl::hash_internal::Hash<T>;
// HashState
//
// A type erased version of the hash state concept, for use in user-defined
// `AbslHashValue` implementations that can't use templates (such as PImpl
// classes, virtual functions, etc.). The type erasure adds overhead so it
// should be avoided unless necessary.
//
// Note: This wrapper will only erase calls to:
// combine_contiguous(H, const unsigned char*, size_t)
//
// All other calls will be handled internally and will not invoke overloads
// provided by the wrapped class.
//
// Users of this class should still define a template `AbslHashValue` function,
// but can use `absl::HashState::Create(&state)` to erase the type of the hash
// state and dispatch to their private hashing logic.
//
// This state can be used like any other hash state. In particular, you can call
// `HashState::combine()` and `HashState::combine_contiguous()` on it.
//
// Example:
//
// class Interface {
// public:
// template <typename H>
// friend H AbslHashValue(H state, const Interface& value) {
// state = H::combine(std::move(state), std::type_index(typeid(*this)));
// value.HashValue(absl::HashState::Create(&state));
// return state;
// }
// private:
// virtual void HashValue(absl::HashState state) const = 0;
// };
//
// class Impl : Interface {
// private:
// void HashValue(absl::HashState state) const override {
// absl::HashState::combine(std::move(state), v1_, v2_);
// }
// int v1_;
// string v2_;
// };
class HashState : public hash_internal::HashStateBase<HashState> {
public:
// HashState::Create()
//
// Create a new `HashState` instance that wraps `state`. All calls to
// `combine()` and `combine_contiguous()` on the new instance will be
// redirected to the original `state` object. The `state` object must outlive
// the `HashState` instance.
template <typename T>
static HashState Create(T* state) {
HashState s;
s.Init(state);
return s;
}
HashState(const HashState&) = delete;
HashState& operator=(const HashState&) = delete;
HashState(HashState&&) = default;
HashState& operator=(HashState&&) = default;
// HashState::combine()
//
// Combines an arbitrary number of values into a hash state, returning the
// updated state.
using HashState::HashStateBase::combine;
// HashState::combine_contiguous()
//
// Combines a contiguous array of `size` elements into a hash state, returning
// the updated state.
static HashState combine_contiguous(HashState hash_state,
const unsigned char* first, size_t size) {
hash_state.combine_contiguous_(hash_state.state_, first, size);
return hash_state;
}
using HashState::HashStateBase::combine_contiguous;
private:
HashState() = default;
template <typename T>
static void CombineContiguousImpl(void* p, const unsigned char* first,
size_t size) {
T& state = *static_cast<T*>(p);
state = T::combine_contiguous(std::move(state), first, size);
}
template <typename T>
void Init(T* state) {
state_ = state;
combine_contiguous_ = &CombineContiguousImpl<T>;
}
// Do not erase an already erased state.
void Init(HashState* state) {
state_ = state->state_;
combine_contiguous_ = state->combine_contiguous_;
}
void* state_;
void (*combine_contiguous_)(void*, const unsigned char*, size_t);
};
} // namespace absl
#endif // ABSL_HASH_HASH_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/hash/hash.h"
#include <array>
#include <cstring>
#include <deque>
#include <forward_list>
#include <functional>
#include <iterator>
#include <limits>
#include <list>
#include <map>
#include <memory>
#include <numeric>
#include <random>
#include <set>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash_testing.h"
#include "absl/hash/internal/spy_hash_state.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
namespace {
using absl::Hash;
using absl::hash_internal::SpyHashState;
template <typename T>
class HashValueIntTest : public testing::Test {
};
TYPED_TEST_CASE_P(HashValueIntTest);
template <typename T>
SpyHashState SpyHash(const T& value) {
return SpyHashState::combine(SpyHashState(), value);
}
// Helper trait to verify if T is hashable. We use absl::Hash's poison status to
// detect it.
template <typename T>
using is_hashable = std::is_default_constructible<absl::Hash<T>>;
TYPED_TEST_P(HashValueIntTest, BasicUsage) {
EXPECT_TRUE((is_hashable<TypeParam>::value));
TypeParam n = 42;
EXPECT_EQ(SpyHash(n), SpyHash(TypeParam{42}));
EXPECT_NE(SpyHash(n), SpyHash(TypeParam{0}));
EXPECT_NE(SpyHash(std::numeric_limits<TypeParam>::max()),
SpyHash(std::numeric_limits<TypeParam>::min()));
}
TYPED_TEST_P(HashValueIntTest, FastPath) {
// Test the fast-path to make sure the values are the same.
TypeParam n = 42;
EXPECT_EQ(absl::Hash<TypeParam>{}(n),
absl::Hash<std::tuple<TypeParam>>{}(std::tuple<TypeParam>(n)));
}
REGISTER_TYPED_TEST_CASE_P(HashValueIntTest, BasicUsage, FastPath);
using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t, uint32_t,
uint64_t, size_t>;
INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueIntTest, IntTypes);
template <typename T, typename = void>
struct IsHashCallble : std::false_type {};
template <typename T>
struct IsHashCallble<T, absl::void_t<decltype(std::declval<absl::Hash<T>>()(
std::declval<const T&>()))>> : std::true_type {};
template <typename T, typename = void>
struct IsAggregateInitializable : std::false_type {};
template <typename T>
struct IsAggregateInitializable<T, absl::void_t<decltype(T{})>>
: std::true_type {};
TEST(IsHashableTest, ValidHash) {
EXPECT_TRUE((is_hashable<int>::value));
EXPECT_TRUE(std::is_default_constructible<absl::Hash<int>>::value);
EXPECT_TRUE(std::is_copy_constructible<absl::Hash<int>>::value);
EXPECT_TRUE(std::is_move_constructible<absl::Hash<int>>::value);
EXPECT_TRUE(absl::is_copy_assignable<absl::Hash<int>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::Hash<int>>::value);
EXPECT_TRUE(IsHashCallble<int>::value);
EXPECT_TRUE(IsAggregateInitializable<absl::Hash<int>>::value);
}
#if ABSL_HASH_INTERNAL_CAN_POISON_ && !defined(__APPLE__)
TEST(IsHashableTest, PoisonHash) {
struct X {};
EXPECT_FALSE((is_hashable<X>::value));
EXPECT_FALSE(std::is_default_constructible<absl::Hash<X>>::value);
EXPECT_FALSE(std::is_copy_constructible<absl::Hash<X>>::value);
EXPECT_FALSE(std::is_move_constructible<absl::Hash<X>>::value);
EXPECT_FALSE(absl::is_copy_assignable<absl::Hash<X>>::value);
EXPECT_FALSE(absl::is_move_assignable<absl::Hash<X>>::value);
EXPECT_FALSE(IsHashCallble<X>::value);
EXPECT_FALSE(IsAggregateInitializable<absl::Hash<X>>::value);
}
#endif // ABSL_HASH_INTERNAL_CAN_POISON_
// Hashable types
//
// These types exist simply to exercise various AbslHashValue behaviors, so
// they are named by what their AbslHashValue overload does.
struct NoOp {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, NoOp n) {
return std::move(h);
}
};
struct EmptyCombine {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, EmptyCombine e) {
return HashCode::combine(std::move(h));
}
};
template <typename Int>
struct CombineIterative {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, CombineIterative c) {
for (int i = 0; i < 5; ++i) {
h = HashCode::combine(std::move(h), Int(i));
}
return h;
}
};
template <typename Int>
struct CombineVariadic {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, CombineVariadic c) {
return HashCode::combine(std::move(h), Int(0), Int(1), Int(2), Int(3),
Int(4));
}
};
using InvokeTag = absl::hash_internal::InvokeHashTag;
template <InvokeTag T>
using InvokeTagConstant = std::integral_constant<InvokeTag, T>;
template <InvokeTag... Tags>
struct MinTag;
template <InvokeTag a, InvokeTag b, InvokeTag... Tags>
struct MinTag<a, b, Tags...> : MinTag<(a < b ? a : b), Tags...> {};
template <InvokeTag a>
struct MinTag<a> : InvokeTagConstant<a> {};
template <InvokeTag... Tags>
struct CustomHashType {
size_t value;
};
template <InvokeTag allowed, InvokeTag... tags>
struct EnableIfContained
: std::enable_if<absl::disjunction<
std::integral_constant<bool, allowed == tags>...>::value> {};
template <
typename H, InvokeTag... Tags,
typename = typename EnableIfContained<InvokeTag::kHashValue, Tags...>::type>
H AbslHashValue(H state, CustomHashType<Tags...> t) {
static_assert(MinTag<Tags...>::value == InvokeTag::kHashValue, "");
return H::combine(std::move(state),
t.value + static_cast<int>(InvokeTag::kHashValue));
}
} // namespace
namespace absl {
namespace hash_internal {
template <InvokeTag... Tags>
struct is_uniquely_represented<
CustomHashType<Tags...>,
typename EnableIfContained<InvokeTag::kUniquelyRepresented, Tags...>::type>
: std::true_type {};
} // namespace hash_internal
} // namespace absl
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
namespace ABSL_INTERNAL_LEGACY_HASH_NAMESPACE {
template <InvokeTag... Tags>
struct hash<CustomHashType<Tags...>> {
template <InvokeTag... TagsIn, typename = typename EnableIfContained<
InvokeTag::kLegacyHash, TagsIn...>::type>
size_t operator()(CustomHashType<TagsIn...> t) const {
static_assert(MinTag<Tags...>::value == InvokeTag::kLegacyHash, "");
return t.value + static_cast<int>(InvokeTag::kLegacyHash);
}
};
} // namespace ABSL_INTERNAL_LEGACY_HASH_NAMESPACE
#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
namespace std {
template <InvokeTag... Tags> // NOLINT
struct hash<CustomHashType<Tags...>> {
template <InvokeTag... TagsIn, typename = typename EnableIfContained<
InvokeTag::kStdHash, TagsIn...>::type>
size_t operator()(CustomHashType<TagsIn...> t) const {
static_assert(MinTag<Tags...>::value == InvokeTag::kStdHash, "");
return t.value + static_cast<int>(InvokeTag::kStdHash);
}
};
} // namespace std
namespace {
template <typename... T>
void TestCustomHashType(InvokeTagConstant<InvokeTag::kNone>, T...) {
using type = CustomHashType<T::value...>;
SCOPED_TRACE(testing::PrintToString(std::vector<InvokeTag>{T::value...}));
EXPECT_TRUE(is_hashable<type>());
EXPECT_TRUE(is_hashable<const type>());
EXPECT_TRUE(is_hashable<const type&>());
const size_t offset = static_cast<int>(std::min({T::value...}));
EXPECT_EQ(SpyHash(type{7}), SpyHash(size_t{7 + offset}));
}
void TestCustomHashType(InvokeTagConstant<InvokeTag::kNone>) {
#if ABSL_HASH_INTERNAL_CAN_POISON_
// is_hashable is false if we don't support any of the hooks.
using type = CustomHashType<>;
EXPECT_FALSE(is_hashable<type>());
EXPECT_FALSE(is_hashable<const type>());
EXPECT_FALSE(is_hashable<const type&>());
#endif // ABSL_HASH_INTERNAL_CAN_POISON_
}
template <InvokeTag Tag, typename... T>
void TestCustomHashType(InvokeTagConstant<Tag> tag, T... t) {
constexpr auto next = static_cast<InvokeTag>(static_cast<int>(Tag) + 1);
TestCustomHashType(InvokeTagConstant<next>(), tag, t...);
TestCustomHashType(InvokeTagConstant<next>(), t...);
}
TEST(HashTest, CustomHashType) {
TestCustomHashType(InvokeTagConstant<InvokeTag{}>());
}
TEST(HashTest, NoOpsAreEquivalent) {
EXPECT_EQ(Hash<NoOp>()({}), Hash<NoOp>()({}));
EXPECT_EQ(Hash<NoOp>()({}), Hash<EmptyCombine>()({}));
}
template <typename T>
class HashIntTest : public testing::Test {
};
TYPED_TEST_CASE_P(HashIntTest);
TYPED_TEST_P(HashIntTest, BasicUsage) {
EXPECT_NE(Hash<NoOp>()({}), Hash<TypeParam>()(0));
EXPECT_NE(Hash<NoOp>()({}),
Hash<TypeParam>()(std::numeric_limits<TypeParam>::max()));
if (std::numeric_limits<TypeParam>::min() != 0) {
EXPECT_NE(Hash<NoOp>()({}),
Hash<TypeParam>()(std::numeric_limits<TypeParam>::min()));
}
EXPECT_EQ(Hash<CombineIterative<TypeParam>>()({}),
Hash<CombineVariadic<TypeParam>>()({}));
}
REGISTER_TYPED_TEST_CASE_P(HashIntTest, BasicUsage);
using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t, uint32_t,
uint64_t, size_t>;
INSTANTIATE_TYPED_TEST_CASE_P(My, HashIntTest, IntTypes);
struct StructWithPadding {
char c;
int i;
template <typename H>
friend H AbslHashValue(H hash_state, const StructWithPadding& s) {
return H::combine(std::move(hash_state), s.c, s.i);
}
};
static_assert(sizeof(StructWithPadding) > sizeof(char) + sizeof(int),
"StructWithPadding doesn't have padding");
static_assert(std::is_standard_layout<StructWithPadding>::value, "");
// This check has to be disabled because libstdc++ doesn't support it.
// static_assert(std::is_trivially_constructible<StructWithPadding>::value, "");
template <typename T>
struct ArraySlice {
T* begin;
T* end;
template <typename H>
friend H AbslHashValue(H hash_state, const ArraySlice& slice) {
for (auto t = slice.begin; t != slice.end; ++t) {
hash_state = H::combine(std::move(hash_state), *t);
}
return hash_state;
}
};
TEST(HashTest, HashNonUniquelyRepresentedType) {
// Create equal StructWithPadding objects that are known to have non-equal
// padding bytes.
static const size_t kNumStructs = 10;
unsigned char buffer1[kNumStructs * sizeof(StructWithPadding)];
std::memset(buffer1, 0, sizeof(buffer1));
auto* s1 = reinterpret_cast<StructWithPadding*>(buffer1);
unsigned char buffer2[kNumStructs * sizeof(StructWithPadding)];
std::memset(buffer2, 255, sizeof(buffer2));
auto* s2 = reinterpret_cast<StructWithPadding*>(buffer2);
for (int i = 0; i < kNumStructs; ++i) {
SCOPED_TRACE(i);
s1[i].c = s2[i].c = '0' + i;
s1[i].i = s2[i].i = i;
ASSERT_FALSE(memcmp(buffer1 + i * sizeof(StructWithPadding),
buffer2 + i * sizeof(StructWithPadding),
sizeof(StructWithPadding)) == 0)
<< "Bug in test code: objects do not have unequal"
<< " object representations";
}
EXPECT_EQ(Hash<StructWithPadding>()(s1[0]), Hash<StructWithPadding>()(s2[0]));
EXPECT_EQ(Hash<ArraySlice<StructWithPadding>>()({s1, s1 + kNumStructs}),
Hash<ArraySlice<StructWithPadding>>()({s2, s2 + kNumStructs}));
}
TEST(HashTest, StandardHashContainerUsage) {
std::unordered_map<int, std::string, Hash<int>> map = {{0, "foo"}, { 42, "bar" }};
EXPECT_NE(map.find(0), map.end());
EXPECT_EQ(map.find(1), map.end());
EXPECT_NE(map.find(0u), map.end());
}
struct ConvertibleFromNoOp {
ConvertibleFromNoOp(NoOp) {} // NOLINT(runtime/explicit)
template <typename H>
friend H AbslHashValue(H hash_state, ConvertibleFromNoOp) {
return H::combine(std::move(hash_state), 1);
}
};
TEST(HashTest, HeterogeneousCall) {
EXPECT_NE(Hash<ConvertibleFromNoOp>()(NoOp()),
Hash<NoOp>()(NoOp()));
}
TEST(IsUniquelyRepresentedTest, SanityTest) {
using absl::hash_internal::is_uniquely_represented;
EXPECT_TRUE(is_uniquely_represented<unsigned char>::value);
EXPECT_TRUE(is_uniquely_represented<int>::value);
EXPECT_FALSE(is_uniquely_represented<bool>::value);
EXPECT_FALSE(is_uniquely_represented<int*>::value);
}
struct IntAndString {
int i;
std::string s;
template <typename H>
friend H AbslHashValue(H hash_state, IntAndString int_and_string) {
return H::combine(std::move(hash_state), int_and_string.s,
int_and_string.i);
}
};
TEST(HashTest, SmallValueOn64ByteBoundary) {
Hash<IntAndString>()(IntAndString{0, std::string(63, '0')});
}
struct TypeErased {
size_t n;
template <typename H>
friend H AbslHashValue(H hash_state, const TypeErased& v) {
v.HashValue(absl::HashState::Create(&hash_state));
return hash_state;
}
void HashValue(absl::HashState state) const {
absl::HashState::combine(std::move(state), n);
}
};
TEST(HashTest, TypeErased) {
EXPECT_TRUE((is_hashable<TypeErased>::value));
EXPECT_TRUE((is_hashable<std::pair<TypeErased, int>>::value));
EXPECT_EQ(SpyHash(TypeErased{7}), SpyHash(size_t{7}));
EXPECT_NE(SpyHash(TypeErased{7}), SpyHash(size_t{13}));
EXPECT_EQ(SpyHash(std::make_pair(TypeErased{7}, 17)),
SpyHash(std::make_pair(size_t{7}, 17)));
}
} // namespace
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_HASH_HASH_TESTING_H_
#define ABSL_HASH_HASH_TESTING_H_
#include <initializer_list>
#include <tuple>
#include <type_traits>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/hash/internal/spy_hash_state.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/variant.h"
namespace absl {
// Run the absl::Hash algorithm over all the elements passed in and verify that
// their hash expansion is congruent with their `==` operator.
//
// It is used in conjunction with EXPECT_TRUE. Failures will output information
// on what requirement failed and on which objects.
//
// Users should pass a collection of types as either an initializer list or a
// container of cases.
//
// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
// {v1, v2, ..., vN}));
//
// std::vector<MyType> cases;
// // Fill cases...
// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
//
// Users can pass a variety of types for testing heterogeneous lookup with
// `std::make_tuple`:
//
// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
// std::make_tuple(v1, v2, ..., vN)));
//
//
// Ideally, the values passed should provide enough coverage of the `==`
// operator and the AbslHashValue implementations.
// For dynamically sized types, the empty state should usually be included in
// the values.
//
// The function accepts an optional comparator function, in case that `==` is
// not enough for the values provided.
//
// Usage:
//
// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
// std::make_tuple(v1, v2, ..., vN), MyCustomEq{}));
//
// It checks the following requirements:
// 1. The expansion for a value is deterministic.
// 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates
// to true, then their hash expansion must be equal.
// 3. If `a == b` evaluates to false their hash expansion must be unequal.
// 4. If `a == b` evaluates to false neither hash expansion can be a
// suffix of the other.
// 5. AbslHashValue overloads should not be called by the user. They are only
// meant to be called by the framework. Users should call H::combine() and
// H::combine_contiguous().
// 6. No moved-from instance of the hash state is used in the implementation
// of AbslHashValue.
//
// The values do not have to have the same type. This can be useful for
// equivalent types that support heterogeneous lookup.
//
// A possible reason for breaking (2) is combining state in the hash expansion
// that was not used in `==`.
// For example:
//
// struct Bad2 {
// int a, b;
// template <typename H>
// friend H AbslHashValue(H state, Bad2 x) {
// // Uses a and b.
// return H::combine(x.a, x.b);
// }
// friend bool operator==(Bad2 x, Bad2 y) {
// // Only uses a.
// return x.a == y.a;
// }
// };
//
// As for (3), breaking this usually means that there is state being passed to
// the `==` operator that is not used in the hash expansion.
// For example:
//
// struct Bad3 {
// int a, b;
// template <typename H>
// friend H AbslHashValue(H state, Bad3 x) {
// // Only uses a.
// return H::combine(x.a);
// }
// friend bool operator==(Bad3 x, Bad3 y) {
// // Uses a and b.
// return x.a == y.a && x.b == y.b;
// }
// };
//
// Finally, a common way to break 4 is by combining dynamic ranges without
// combining the size of the range.
// For example:
//
// struct Bad4 {
// int *p, size;
// template <typename H>
// friend H AbslHashValue(H state, Bad4 x) {
// return H::combine_range(x.p, x.p + x.size);
// }
// friend bool operator==(Bad4 x, Bad4 y) {
// return std::equal(x.p, x.p + x.size, y.p, y.p + y.size);
// }
// };
//
// An easy solution to this is to combine the size after combining the range,
// like so:
// template <typename H>
// friend H AbslHashValue(H state, Bad4 x) {
// return H::combine(H::combine_range(x.p, x.p + x.size), x.size);
// }
//
template <int&... ExplicitBarrier, typename Container>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(const Container& values);
template <int&... ExplicitBarrier, typename Container, typename Eq>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals);
template <int&..., typename T>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values);
template <int&..., typename T, typename Eq>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values,
Eq equals);
namespace hash_internal {
struct PrintVisitor {
size_t index;
template <typename T>
std::string operator()(const T* value) const {
return absl::StrCat("#", index, "(", testing::PrintToString(*value), ")");
}
};
template <typename Eq>
struct EqVisitor {
Eq eq;
template <typename T, typename U>
bool operator()(const T* t, const U* u) const {
return eq(*t, *u);
}
};
struct ExpandVisitor {
template <typename T>
SpyHashState operator()(const T* value) const {
return SpyHashState::combine(SpyHashState(), *value);
}
};
template <typename Container, typename Eq>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) {
using V = typename Container::value_type;
struct Info {
const V& value;
size_t index;
std::string ToString() const { return absl::visit(PrintVisitor{index}, value); }
SpyHashState expand() const { return absl::visit(ExpandVisitor{}, value); }
};
using EqClass = std::vector<Info>;
std::vector<EqClass> classes;
// Gather the values in equivalence classes.
size_t i = 0;
for (const auto& value : values) {
EqClass* c = nullptr;
for (auto& eqclass : classes) {
if (absl::visit(EqVisitor<Eq>{equals}, value, eqclass[0].value)) {
c = &eqclass;
break;
}
}
if (c == nullptr) {
classes.emplace_back();
c = &classes.back();
}
c->push_back({value, i});
++i;
// Verify potential errors captured by SpyHashState.
if (auto error = c->back().expand().error()) {
return testing::AssertionFailure() << *error;
}
}
if (classes.size() < 2) {
return testing::AssertionFailure()
<< "At least two equivalence classes are expected.";
}
// We assume that equality is correctly implemented.
// Now we verify that AbslHashValue is also correctly implemented.
for (const auto& c : classes) {
// All elements of the equivalence class must have the same hash expansion.
const SpyHashState expected = c[0].expand();
for (const Info& v : c) {
if (v.expand() != v.expand()) {
return testing::AssertionFailure()
<< "Hash expansion for " << v.ToString()
<< " is non-deterministic.";
}
if (v.expand() != expected) {
return testing::AssertionFailure()
<< "Values " << c[0].ToString() << " and " << v.ToString()
<< " evaluate as equal but have an unequal hash expansion.";
}
}
// Elements from other classes must have different hash expansion.
for (const auto& c2 : classes) {
if (&c == &c2) continue;
const SpyHashState c2_hash = c2[0].expand();
switch (SpyHashState::Compare(expected, c2_hash)) {
case SpyHashState::CompareResult::kEqual:
return testing::AssertionFailure()
<< "Values " << c[0].ToString() << " and " << c2[0].ToString()
<< " evaluate as unequal but have an equal hash expansion.";
case SpyHashState::CompareResult::kBSuffixA:
return testing::AssertionFailure()
<< "Hash expansion of " << c2[0].ToString()
<< " is a suffix of the hash expansion of " << c[0].ToString()
<< ".";
case SpyHashState::CompareResult::kASuffixB:
return testing::AssertionFailure()
<< "Hash expansion of " << c[0].ToString()
<< " is a suffix of the hash expansion of " << c2[0].ToString()
<< ".";
case SpyHashState::CompareResult::kUnequal:
break;
}
}
}
return testing::AssertionSuccess();
}
template <typename... T>
struct TypeSet {
template <typename U, bool = disjunction<std::is_same<T, U>...>::value>
struct Insert {
using type = TypeSet<U, T...>;
};
template <typename U>
struct Insert<U, true> {
using type = TypeSet;
};
template <template <typename...> class C>
using apply = C<T...>;
};
template <typename... T>
struct MakeTypeSet : TypeSet<>{};
template <typename T, typename... Ts>
struct MakeTypeSet<T, Ts...> : MakeTypeSet<Ts...>::template Insert<T>::type {};
template <typename... T>
using VariantForTypes = typename MakeTypeSet<
const typename std::decay<T>::type*...>::template apply<absl::variant>;
template <typename Container>
struct ContainerAsVector {
using V = absl::variant<const typename Container::value_type*>;
using Out = std::vector<V>;
static Out Do(const Container& values) {
Out out;
for (const auto& v : values) out.push_back(&v);
return out;
}
};
template <typename... T>
struct ContainerAsVector<std::tuple<T...>> {
using V = VariantForTypes<T...>;
using Out = std::vector<V>;
template <size_t... I>
static Out DoImpl(const std::tuple<T...>& tuple, absl::index_sequence<I...>) {
return Out{&std::get<I>(tuple)...};
}
static Out Do(const std::tuple<T...>& values) {
return DoImpl(values, absl::index_sequence_for<T...>());
}
};
template <>
struct ContainerAsVector<std::tuple<>> {
static std::vector<VariantForTypes<int>> Do(std::tuple<>) { return {}; }
};
struct DefaultEquals {
template <typename T, typename U>
bool operator()(const T& t, const U& u) const {
return t == u;
}
};
} // namespace hash_internal
template <int&..., typename Container>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(const Container& values) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<Container>::Do(values),
hash_internal::DefaultEquals{});
}
template <int&..., typename Container, typename Eq>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<Container>::Do(values),
equals);
}
template <int&..., typename T>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values),
hash_internal::DefaultEquals{});
}
template <int&..., typename T, typename Eq>
ABSL_MUST_USE_RESULT testing::AssertionResult
VerifyTypeImplementsAbslHashCorrectly(std::initializer_list<T> values,
Eq equals) {
return hash_internal::VerifyTypeImplementsAbslHashCorrectly(
hash_internal::ContainerAsVector<std::initializer_list<T>>::Do(values),
equals);
}
} // namespace absl
#endif // ABSL_HASH_HASH_TESTING_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file provides CityHash64() and related functions.
//
// It's probably possible to create even faster hash functions by
// writing a program that systematically explores some of the space of
// possible hash functions, by using SIMD instructions, or by
// compromising on hash quality.
#include "absl/hash/internal/city.h"
#include <string.h> // for memcpy and memset
#include <algorithm>
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/optimization.h"
namespace absl {
namespace hash_internal {
#ifdef ABSL_IS_BIG_ENDIAN
#define uint32_in_expected_order(x) (absl::gbswap_32(x))
#define uint64_in_expected_order(x) (absl::gbswap_64(x))
#else
#define uint32_in_expected_order(x) (x)
#define uint64_in_expected_order(x) (x)
#endif
static uint64_t Fetch64(const char *p) {
return uint64_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
static uint32_t Fetch32(const char *p) {
return uint32_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
// Some primes between 2^63 and 2^64 for various uses.
static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
static const uint64_t k1 = 0xb492b66fbe98f273ULL;
static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
// Magic numbers for 32-bit hashing. Copied from Murmur3.
static const uint32_t c1 = 0xcc9e2d51;
static const uint32_t c2 = 0x1b873593;
// A 32-bit to 32-bit integer hash copied from Murmur3.
static uint32_t fmix(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
static uint32_t Rotate32(uint32_t val, int shift) {
// Avoid shifting by 32: doing so yields an undefined result.
return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
}
#undef PERMUTE3
#define PERMUTE3(a, b, c) \
do { \
std::swap(a, b); \
std::swap(a, c); \
} while (0)
static uint32_t Mur(uint32_t a, uint32_t h) {
// Helper from Murmur3 for combining two 32-bit values.
a *= c1;
a = Rotate32(a, 17);
a *= c2;
h ^= a;
h = Rotate32(h, 19);
return h * 5 + 0xe6546b64;
}
static uint32_t Hash32Len13to24(const char *s, size_t len) {
uint32_t a = Fetch32(s - 4 + (len >> 1));
uint32_t b = Fetch32(s + 4);
uint32_t c = Fetch32(s + len - 8);
uint32_t d = Fetch32(s + (len >> 1));
uint32_t e = Fetch32(s);
uint32_t f = Fetch32(s + len - 4);
uint32_t h = len;
return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
}
static uint32_t Hash32Len0to4(const char *s, size_t len) {
uint32_t b = 0;
uint32_t c = 9;
for (size_t i = 0; i < len; i++) {
signed char v = s[i];
b = b * c1 + v;
c ^= b;
}
return fmix(Mur(b, Mur(len, c)));
}
static uint32_t Hash32Len5to12(const char *s, size_t len) {
uint32_t a = len, b = len * 5, c = 9, d = b;
a += Fetch32(s);
b += Fetch32(s + len - 4);
c += Fetch32(s + ((len >> 1) & 4));
return fmix(Mur(c, Mur(b, Mur(a, d))));
}
uint32_t CityHash32(const char *s, size_t len) {
if (len <= 24) {
return len <= 12
? (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len))
: Hash32Len13to24(s, len);
}
// len > 24
uint32_t h = len, g = c1 * len, f = g;
uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
uint32_t a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;
uint32_t a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2;
uint32_t a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2;
h ^= a0;
h = Rotate32(h, 19);
h = h * 5 + 0xe6546b64;
h ^= a2;
h = Rotate32(h, 19);
h = h * 5 + 0xe6546b64;
g ^= a1;
g = Rotate32(g, 19);
g = g * 5 + 0xe6546b64;
g ^= a3;
g = Rotate32(g, 19);
g = g * 5 + 0xe6546b64;
f += a4;
f = Rotate32(f, 19);
f = f * 5 + 0xe6546b64;
size_t iters = (len - 1) / 20;
do {
uint32_t a0 = Rotate32(Fetch32(s) * c1, 17) * c2;
uint32_t a1 = Fetch32(s + 4);
uint32_t a2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2;
uint32_t a3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2;
uint32_t a4 = Fetch32(s + 16);
h ^= a0;
h = Rotate32(h, 18);
h = h * 5 + 0xe6546b64;
f += a1;
f = Rotate32(f, 19);
f = f * c1;
g += a2;
g = Rotate32(g, 18);
g = g * 5 + 0xe6546b64;
h ^= a3 + a1;
h = Rotate32(h, 19);
h = h * 5 + 0xe6546b64;
g ^= a4;
g = absl::gbswap_32(g) * 5;
h += a4 * 5;
h = absl::gbswap_32(h);
f += a0;
PERMUTE3(f, h, g);
s += 20;
} while (--iters != 0);
g = Rotate32(g, 11) * c1;
g = Rotate32(g, 17) * c1;
f = Rotate32(f, 11) * c1;
f = Rotate32(f, 17) * c1;
h = Rotate32(h + g, 19);
h = h * 5 + 0xe6546b64;
h = Rotate32(h, 17) * c1;
h = Rotate32(h + f, 19);
h = h * 5 + 0xe6546b64;
h = Rotate32(h, 17) * c1;
return h;
}
// Bitwise right rotate. Normally this will compile to a single
// instruction, especially if the shift is a manifest constant.
static uint64_t Rotate(uint64_t val, int shift) {
// Avoid shifting by 64: doing so yields an undefined result.
return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
}
static uint64_t ShiftMix(uint64_t val) { return val ^ (val >> 47); }
static uint64_t HashLen16(uint64_t u, uint64_t v) {
return Hash128to64(uint128(u, v));
}
static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) {
// Murmur-inspired hashing.
uint64_t a = (u ^ v) * mul;
a ^= (a >> 47);
uint64_t b = (v ^ a) * mul;
b ^= (b >> 47);
b *= mul;
return b;
}
static uint64_t HashLen0to16(const char *s, size_t len) {
if (len >= 8) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch64(s) + k2;
uint64_t b = Fetch64(s + len - 8);
uint64_t c = Rotate(b, 37) * mul + a;
uint64_t d = (Rotate(a, 25) + b) * mul;
return HashLen16(c, d, mul);
}
if (len >= 4) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch32(s);
return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
}
if (len > 0) {
uint8_t a = s[0];
uint8_t b = s[len >> 1];
uint8_t c = s[len - 1];
uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
uint32_t z = len + (static_cast<uint32_t>(c) << 2);
return ShiftMix(y * k2 ^ z * k0) * k2;
}
return k2;
}
// This probably works well for 16-byte strings as well, but it may be overkill
// in that case.
static uint64_t HashLen17to32(const char *s, size_t len) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch64(s) * k1;
uint64_t b = Fetch64(s + 8);
uint64_t c = Fetch64(s + len - 8) * mul;
uint64_t d = Fetch64(s + len - 16) * k2;
return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d,
a + Rotate(b + k2, 18) + c, mul);
}
// Return a 16-byte hash for 48 bytes. Quick and dirty.
// Callers do best to use "random-looking" values for a and b.
static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(uint64_t w, uint64_t x,
uint64_t y, uint64_t z,
uint64_t a, uint64_t b) {
a += w;
b = Rotate(b + a + z, 21);
uint64_t c = a;
a += x;
a += y;
b += Rotate(a, 44);
return std::make_pair(a + z, b + c);
}
// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s, uint64_t a,
uint64_t b) {
return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16),
Fetch64(s + 24), a, b);
}
// Return an 8-byte hash for 33 to 64 bytes.
static uint64_t HashLen33to64(const char *s, size_t len) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch64(s) * k2;
uint64_t b = Fetch64(s + 8);
uint64_t c = Fetch64(s + len - 24);
uint64_t d = Fetch64(s + len - 32);
uint64_t e = Fetch64(s + 16) * k2;
uint64_t f = Fetch64(s + 24) * 9;
uint64_t g = Fetch64(s + len - 8);
uint64_t h = Fetch64(s + len - 16) * mul;
uint64_t u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
uint64_t v = ((a + g) ^ d) + f + 1;
uint64_t w = absl::gbswap_64((u + v) * mul) + h;
uint64_t x = Rotate(e + f, 42) + c;
uint64_t y = (absl::gbswap_64((v + w) * mul) + g) * mul;
uint64_t z = e + f + c;
a = absl::gbswap_64((x + z) * mul + y) + b;
b = ShiftMix((z + a) * mul + d + h) * mul;
return b + x;
}
uint64_t CityHash64(const char *s, size_t len) {
if (len <= 32) {
if (len <= 16) {
return HashLen0to16(s, len);
} else {
return HashLen17to32(s, len);
}
} else if (len <= 64) {
return HashLen33to64(s, len);
}
// For strings over 64 bytes we hash the end first, and then as we
// loop we keep 56 bytes of state: v, w, x, y, and z.
uint64_t x = Fetch64(s + len - 40);
uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
std::pair<uint64_t, uint64_t> v = WeakHashLen32WithSeeds(s + len - 64, len, z);
std::pair<uint64_t, uint64_t> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
x = x * k1 + Fetch64(s);
// Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
len = (len - 1) & ~static_cast<size_t>(63);
do {
x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
x ^= w.second;
y += v.first + Fetch64(s + 40);
z = Rotate(z + w.first, 33) * k1;
v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
std::swap(z, x);
s += 64;
len -= 64;
} while (len != 0);
return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
HashLen16(v.second, w.second) + x);
}
uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed) {
return CityHash64WithSeeds(s, len, k2, seed);
}
uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
uint64_t seed1) {
return HashLen16(CityHash64(s, len) - seed0, seed1);
}
// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
// of any length representable in signed long. Based on City and Murmur.
static uint128 CityMurmur(const char *s, size_t len, uint128 seed) {
uint64_t a = Uint128Low64(seed);
uint64_t b = Uint128High64(seed);
uint64_t c = 0;
uint64_t d = 0;
int64_t l = len - 16;
if (l <= 0) { // len <= 16
a = ShiftMix(a * k1) * k1;
c = b * k1 + HashLen0to16(s, len);
d = ShiftMix(a + (len >= 8 ? Fetch64(s) : c));
} else { // len > 16
c = HashLen16(Fetch64(s + len - 8) + k1, a);
d = HashLen16(b + len, c + Fetch64(s + len - 16));
a += d;
do {
a ^= ShiftMix(Fetch64(s) * k1) * k1;
a *= k1;
b ^= a;
c ^= ShiftMix(Fetch64(s + 8) * k1) * k1;
c *= k1;
d ^= c;
s += 16;
l -= 16;
} while (l > 0);
}
a = HashLen16(a, c);
b = HashLen16(d, b);
return uint128(a ^ b, HashLen16(b, a));
}
uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed) {
if (len < 128) {
return CityMurmur(s, len, seed);
}
// We expect len >= 128 to be the common case. Keep 56 bytes of state:
// v, w, x, y, and z.
std::pair<uint64_t, uint64_t> v, w;
uint64_t x = Uint128Low64(seed);
uint64_t y = Uint128High64(seed);
uint64_t z = len * k1;
v.first = Rotate(y ^ k1, 49) * k1 + Fetch64(s);
v.second = Rotate(v.first, 42) * k1 + Fetch64(s + 8);
w.first = Rotate(y + z, 35) * k1 + x;
w.second = Rotate(x + Fetch64(s + 88), 53) * k1;
// This is the same inner loop as CityHash64(), manually unrolled.
do {
x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
x ^= w.second;
y += v.first + Fetch64(s + 40);
z = Rotate(z + w.first, 33) * k1;
v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
std::swap(z, x);
s += 64;
x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
x ^= w.second;
y += v.first + Fetch64(s + 40);
z = Rotate(z + w.first, 33) * k1;
v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
std::swap(z, x);
s += 64;
len -= 128;
} while (ABSL_PREDICT_TRUE(len >= 128));
x += Rotate(v.first + z, 49) * k0;
y = y * k0 + Rotate(w.second, 37);
z = z * k0 + Rotate(w.first, 27);
w.first *= 9;
v.first *= k0;
// If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
for (size_t tail_done = 0; tail_done < len;) {
tail_done += 32;
y = Rotate(x + y, 42) * k0 + v.second;
w.first += Fetch64(s + len - tail_done + 16);
x = x * k0 + w.first;
z += w.second + Fetch64(s + len - tail_done);
w.second += v.first;
v = WeakHashLen32WithSeeds(s + len - tail_done, v.first + z, v.second);
v.first *= k0;
}
// At this point our 56 bytes of state should contain more than
// enough information for a strong 128-bit hash. We use two
// different 56-byte-to-8-byte hashes to get a 16-byte final result.
x = HashLen16(x, v.first);
y = HashLen16(y + z, w.first);
return uint128(HashLen16(x + v.second, w.second) + y,
HashLen16(x + w.second, y + v.second));
}
uint128 CityHash128(const char *s, size_t len) {
return len >= 16
? CityHash128WithSeed(s + 16, len - 16,
uint128(Fetch64(s), Fetch64(s + 8) + k0))
: CityHash128WithSeed(s, len, uint128(k0, k1));
}
} // namespace hash_internal
} // namespace absl
#ifdef __SSE4_2__
#include <nmmintrin.h>
#include "absl/hash/internal/city_crc.h"
namespace absl {
namespace hash_internal {
// Requires len >= 240.
static void CityHashCrc256Long(const char *s, size_t len, uint32_t seed,
uint64_t *result) {
uint64_t a = Fetch64(s + 56) + k0;
uint64_t b = Fetch64(s + 96) + k0;
uint64_t c = result[0] = HashLen16(b, len);
uint64_t d = result[1] = Fetch64(s + 120) * k0 + len;
uint64_t e = Fetch64(s + 184) + seed;
uint64_t f = 0;
uint64_t g = 0;
uint64_t h = c + d;
uint64_t x = seed;
uint64_t y = 0;
uint64_t z = 0;
// 240 bytes of input per iter.
size_t iters = len / 240;
len -= iters * 240;
do {
#undef CHUNK
#define CHUNK(r) \
PERMUTE3(x, z, y); \
b += Fetch64(s); \
c += Fetch64(s + 8); \
d += Fetch64(s + 16); \
e += Fetch64(s + 24); \
f += Fetch64(s + 32); \
a += b; \
h += f; \
b += c; \
f += d; \
g += e; \
e += z; \
g += x; \
z = _mm_crc32_u64(z, b + g); \
y = _mm_crc32_u64(y, e + h); \
x = _mm_crc32_u64(x, f + a); \
e = Rotate(e, r); \
c += e; \
s += 40
CHUNK(0);
PERMUTE3(a, h, c);
CHUNK(33);
PERMUTE3(a, h, f);
CHUNK(0);
PERMUTE3(b, h, f);
CHUNK(42);
PERMUTE3(b, h, d);
CHUNK(0);
PERMUTE3(b, h, e);
CHUNK(33);
PERMUTE3(a, h, e);
} while (--iters > 0);
while (len >= 40) {
CHUNK(29);
e ^= Rotate(a, 20);
h += Rotate(b, 30);
g ^= Rotate(c, 40);
f += Rotate(d, 34);
PERMUTE3(c, h, g);
len -= 40;
}
if (len > 0) {
s = s + len - 40;
CHUNK(33);
e ^= Rotate(a, 43);
h += Rotate(b, 42);
g ^= Rotate(c, 41);
f += Rotate(d, 40);
}
result[0] ^= h;
result[1] ^= g;
g += h;
a = HashLen16(a, g + z);
x += y << 32;
b += x;
c = HashLen16(c, z) + h;
d = HashLen16(d, e + result[0]);
g += e;
h += HashLen16(x, f);
e = HashLen16(a, d) + g;
z = HashLen16(b, c) + a;
y = HashLen16(g, h) + c;
result[0] = e + z + y + x;
a = ShiftMix((a + y) * k0) * k0 + b;
result[1] += a + result[0];
a = ShiftMix(a * k0) * k0 + c;
result[2] = a + result[1];
a = ShiftMix((a + e) * k0) * k0;
result[3] = a + result[2];
}
// Requires len < 240.
static void CityHashCrc256Short(const char *s, size_t len, uint64_t *result) {
char buf[240];
memcpy(buf, s, len);
memset(buf + len, 0, 240 - len);
CityHashCrc256Long(buf, 240, ~static_cast<uint32_t>(len), result);
}
void CityHashCrc256(const char *s, size_t len, uint64_t *result) {
if (ABSL_PREDICT_TRUE(len >= 240)) {
CityHashCrc256Long(s, len, 0, result);
} else {
CityHashCrc256Short(s, len, result);
}
}
uint128 CityHashCrc128WithSeed(const char *s, size_t len, uint128 seed) {
if (len <= 900) {
return CityHash128WithSeed(s, len, seed);
} else {
uint64_t result[4];
CityHashCrc256(s, len, result);
uint64_t u = Uint128High64(seed) + result[0];
uint64_t v = Uint128Low64(seed) + result[1];
return uint128(HashLen16(u, v + result[2]),
HashLen16(Rotate(v, 32), u * k0 + result[3]));
}
}
uint128 CityHashCrc128(const char *s, size_t len) {
if (len <= 900) {
return CityHash128(s, len);
} else {
uint64_t result[4];
CityHashCrc256(s, len, result);
return uint128(result[2], result[3]);
}
}
} // namespace hash_internal
} // namespace absl
#endif
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// http://code.google.com/p/cityhash/
//
// This file provides a few functions for hashing strings. All of them are
// high-quality functions in the sense that they pass standard tests such
// as Austin Appleby's SMHasher. They are also fast.
//
// For 64-bit x86 code, on short strings, we don't know of anything faster than
// CityHash64 that is of comparable quality. We believe our nearest competitor
// is Murmur3. For 64-bit x86 code, CityHash64 is an excellent choice for hash
// tables and most other hashing (excluding cryptography).
//
// For 64-bit x86 code, on long strings, the picture is more complicated.
// On many recent Intel CPUs, such as Nehalem, Westmere, Sandy Bridge, etc.,
// CityHashCrc128 appears to be faster than all competitors of comparable
// quality. CityHash128 is also good but not quite as fast. We believe our
// nearest competitor is Bob Jenkins' Spooky. We don't have great data for
// other 64-bit CPUs, but for long strings we know that Spooky is slightly
// faster than CityHash on some relatively recent AMD x86-64 CPUs, for example.
// Note that CityHashCrc128 is declared in citycrc.h.
//
// For 32-bit x86 code, we don't know of anything faster than CityHash32 that
// is of comparable quality. We believe our nearest competitor is Murmur3A.
// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.)
//
// Functions in the CityHash family are not suitable for cryptography.
//
// Please see CityHash's README file for more details on our performance
// measurements and so on.
//
// WARNING: This code has been only lightly tested on big-endian platforms!
// It is known to work well on little-endian platforms that have a small penalty
// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs.
// It should work on all 32-bit and 64-bit platforms that allow unaligned reads;
// bug reports are welcome.
//
// By the way, for some hash functions, given strings a and b, the hash
// of a+b is easily derived from the hashes of a and b. This property
// doesn't hold for any hash functions in this file.
#ifndef ABSL_HASH_INTERNAL_CITY_H_
#define ABSL_HASH_INTERNAL_CITY_H_
#include <stdint.h>
#include <stdlib.h> // for size_t.
#include <utility>
namespace absl {
namespace hash_internal {
typedef std::pair<uint64_t, uint64_t> uint128;
inline uint64_t Uint128Low64(const uint128 &x) { return x.first; }
inline uint64_t Uint128High64(const uint128 &x) { return x.second; }
// Hash function for a byte array.
uint64_t CityHash64(const char *s, size_t len);
// Hash function for a byte array. For convenience, a 64-bit seed is also
// hashed into the result.
uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed);
// Hash function for a byte array. For convenience, two seeds are also
// hashed into the result.
uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
uint64_t seed1);
// Hash function for a byte array.
uint128 CityHash128(const char *s, size_t len);
// Hash function for a byte array. For convenience, a 128-bit seed is also
// hashed into the result.
uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed);
// Hash function for a byte array. Most useful in 32-bit binaries.
uint32_t CityHash32(const char *s, size_t len);
// Hash 128 input bits down to 64 bits of output.
// This is intended to be a reasonably good hash function.
inline uint64_t Hash128to64(const uint128 &x) {
// Murmur-inspired hashing.
const uint64_t kMul = 0x9ddfea08eb382d69ULL;
uint64_t a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul;
a ^= (a >> 47);
uint64_t b = (Uint128High64(x) ^ a) * kMul;
b ^= (b >> 47);
b *= kMul;
return b;
}
} // namespace hash_internal
} // namespace absl
#endif // ABSL_HASH_INTERNAL_CITY_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file declares the subset of the CityHash functions that require
// _mm_crc32_u64(). See the CityHash README for details.
//
// Functions in the CityHash family are not suitable for cryptography.
#ifndef ABSL_HASH_INTERNAL_CITY_CRC_H_
#define ABSL_HASH_INTERNAL_CITY_CRC_H_
#include "absl/hash/internal/city.h"
namespace absl {
namespace hash_internal {
// Hash function for a byte array.
uint128 CityHashCrc128(const char *s, size_t len);
// Hash function for a byte array. For convenience, a 128-bit seed is also
// hashed into the result.
uint128 CityHashCrc128WithSeed(const char *s, size_t len, uint128 seed);
// Hash function for a byte array. Sets result[0] ... result[3].
void CityHashCrc256(const char *s, size_t len, uint64_t *result);
} // namespace hash_internal
} // namespace absl
#endif // ABSL_HASH_INTERNAL_CITY_CRC_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/hash/internal/hash.h"
namespace absl {
namespace hash_internal {
ABSL_CONST_INIT const void* const CityHashState::kSeed = &kSeed;
} // namespace hash_internal
} // namespace absl
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: hash.h
// -----------------------------------------------------------------------------
//
#ifndef ABSL_HASH_INTERNAL_HASH_H_
#define ABSL_HASH_INTERNAL_HASH_H_
#include <algorithm>
#include <array>
#include <cmath>
#include <cstring>
#include <deque>
#include <forward_list>
#include <functional>
#include <iterator>
#include <limits>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "absl/utility/utility.h"
#include "absl/hash/internal/city.h"
namespace absl {
namespace hash_internal {
// HashStateBase
//
// A hash state object represents an intermediate state in the computation
// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style
// base class for hash state implementations. Developers adding type support
// for `absl::Hash` should not rely on any parts of the state object other than
// the following member functions:
//
// * HashStateBase::combine()
// * HashStateBase::combine_contiguous()
//
// A derived hash state class of type `H` must provide a static member function
// with a signature similar to the following:
//
// `static H combine_contiguous(H state, const unsigned char*, size_t)`.
//
// `HashStateBase` will provide a complete implementations for a hash state
// object in terms of this method.
//
// Example:
//
// // Use CRTP to define your derived class.
// struct MyHashState : HashStateBase<MyHashState> {
// static H combine_contiguous(H state, const unsigned char*, size_t);
// using MyHashState::HashStateBase::combine;
// using MyHashState::HashStateBase::combine_contiguous;
// };
template <typename H>
class HashStateBase {
public:
// HashStateBase::combine()
//
// Combines an arbitrary number of values into a hash state, returning the
// updated state.
//
// Each of the value types `T` must be separately hashable by the Abseil
// hashing framework.
//
// NOTE:
//
// state = H::combine(std::move(state), value1, value2, value3);
//
// is guaranteed to produce the same hash expansion as:
//
// state = H::combine(std::move(state), value1);
// state = H::combine(std::move(state), value2);
// state = H::combine(std::move(state), value3);
template <typename T, typename... Ts>
static H combine(H state, const T& value, const Ts&... values);
static H combine(H state) { return state; }
// HashStateBase::combine_contiguous()
//
// Combines a contiguous array of `size` elements into a hash state, returning
// the updated state.
//
// NOTE:
//
// state = H::combine_contiguous(std::move(state), data, size);
//
// is NOT guaranteed to produce the same hash expansion as a for-loop (it may
// perform internal optimizations). If you need this guarantee, use the
// for-loop instead.
template <typename T>
static H combine_contiguous(H state, const T* data, size_t size);
};
// is_uniquely_represented
//
// `is_uniquely_represented<T>` is a trait class that indicates whether `T`
// is uniquely represented.
//
// A type is "uniquely represented" if two equal values of that type are
// guaranteed to have the same bytes in their underlying storage. In other
// words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be
// zero. This property cannot be detected automatically, so this trait is false
// by default, but can be specialized by types that wish to assert that they are
// uniquely represented. This makes them eligible for certain optimizations.
//
// If you have any doubt whatsoever, do not specialize this template.
// The default is completely safe, and merely disables some optimizations
// that will not matter for most types. Specializing this template,
// on the other hand, can be very hazardous.
//
// To be uniquely represented, a type must not have multiple ways of
// representing the same value; for example, float and double are not
// uniquely represented, because they have distinct representations for
// +0 and -0. Furthermore, the type's byte representation must consist
// solely of user-controlled data, with no padding bits and no compiler-
// controlled data such as vptrs or sanitizer metadata. This is usually
// very difficult to guarantee, because in most cases the compiler can
// insert data and padding bits at its own discretion.
//
// If you specialize this template for a type `T`, you must do so in the file
// that defines that type (or in this file). If you define that specialization
// anywhere else, `is_uniquely_represented<T>` could have different meanings
// in different places.
//
// The Enable parameter is meaningless; it is provided as a convenience,
// to support certain SFINAE techniques when defining specializations.
template <typename T, typename Enable = void>
struct is_uniquely_represented : std::false_type {};
// is_uniquely_represented<unsigned char>
//
// unsigned char is a synonym for "byte", so it is guaranteed to be
// uniquely represented.
template <>
struct is_uniquely_represented<unsigned char> : std::true_type {};
// is_uniquely_represented for non-standard integral types
//
// Integral types other than bool should be uniquely represented on any
// platform that this will plausibly be ported to.
template <typename Integral>
struct is_uniquely_represented<
Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>
: std::true_type {};
// is_uniquely_represented<bool>
//
//
template <>
struct is_uniquely_represented<bool> : std::false_type {};
// hash_bytes()
//
// Convenience function that combines `hash_state` with the byte representation
// of `value`.
template <typename H, typename T>
H hash_bytes(H hash_state, const T& value) {
const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
return H::combine_contiguous(std::move(hash_state), start, sizeof(value));
}
// -----------------------------------------------------------------------------
// AbslHashValue for Basic Types
// -----------------------------------------------------------------------------
// Note: Default `AbslHashValue` implementations live in `hash_internal`. This
// allows us to block lexical scope lookup when doing an unqualified call to
// `AbslHashValue` below. User-defined implementations of `AbslHashValue` can
// only be found via ADL.
// AbslHashValue() for hashing bool values
//
// We use SFINAE to ensure that this overload only accepts bool, not types that
// are convertible to bool.
template <typename H, typename B>
typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue(
H hash_state, B value) {
return H::combine(std::move(hash_state),
static_cast<unsigned char>(value ? 1 : 0));
}
// AbslHashValue() for hashing enum values
template <typename H, typename Enum>
typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue(
H hash_state, Enum e) {
// In practice, we could almost certainly just invoke hash_bytes directly,
// but it's possible that a sanitizer might one day want to
// store data in the unused bits of an enum. To avoid that risk, we
// convert to the underlying type before hashing. Hopefully this will get
// optimized away; if not, we can reopen discussion with c-toolchain-team.
return H::combine(std::move(hash_state),
static_cast<typename std::underlying_type<Enum>::type>(e));
}
// AbslHashValue() for hashing floating-point values
template <typename H, typename Float>
typename std::enable_if<std::is_floating_point<Float>::value, H>::type
AbslHashValue(H hash_state, Float value) {
return hash_internal::hash_bytes(std::move(hash_state),
value == 0 ? 0 : value);
}
// Long double has the property that it might have extra unused bytes in it.
// For example, in x86 sizeof(long double)==16 but it only really uses 80-bits
// of it. This means we can't use hash_bytes on a long double and have to
// convert it to something else first.
template <typename H>
H AbslHashValue(H hash_state, long double value) {
const int category = std::fpclassify(value);
switch (category) {
case FP_INFINITE:
// Add the sign bit to differentiate between +Inf and -Inf
hash_state = H::combine(std::move(hash_state), std::signbit(value));
break;
case FP_NAN:
case FP_ZERO:
default:
// Category is enough for these.
break;
case FP_NORMAL:
case FP_SUBNORMAL:
// We can't convert `value` directly to double because this would have
// undefined behavior if the value is out of range.
// std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is
// guaranteed to be in range for `double`. The truncation is
// implementation defined, but that works as long as it is deterministic.
int exp;
auto mantissa = static_cast<double>(std::frexp(value, &exp));
hash_state = H::combine(std::move(hash_state), mantissa, exp);
}
return H::combine(std::move(hash_state), category);
}
// AbslHashValue() for hashing pointers
template <typename H, typename T>
H AbslHashValue(H hash_state, T* ptr) {
return hash_internal::hash_bytes(std::move(hash_state), ptr);
}
// AbslHashValue() for hashing nullptr_t
template <typename H>
H AbslHashValue(H hash_state, std::nullptr_t) {
return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
}
// -----------------------------------------------------------------------------
// AbslHashValue for Composite Types
// -----------------------------------------------------------------------------
// is_hashable()
//
// Trait class which returns true if T is hashable by the absl::Hash framework.
// Used for the AbslHashValue implementations for composite types below.
template <typename T>
struct is_hashable;
// AbslHashValue() for hashing pairs
template <typename H, typename T1, typename T2>
typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
H>::type
AbslHashValue(H hash_state, const std::pair<T1, T2>& p) {
return H::combine(std::move(hash_state), p.first, p.second);
}
// hash_tuple()
//
// Helper function for hashing a tuple. The third argument should
// be an index_sequence running from 0 to tuple_size<Tuple> - 1.
template <typename H, typename Tuple, size_t... Is>
H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence<Is...>) {
return H::combine(std::move(hash_state), std::get<Is>(t)...);
}
// AbslHashValue for hashing tuples
template <typename H, typename... Ts>
#if _MSC_VER
// This SFINAE gets MSVC confused under some conditions. Let's just disable it
// for now.
H
#else
typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type
#endif
AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
return hash_internal::hash_tuple(std::move(hash_state), t,
absl::make_index_sequence<sizeof...(Ts)>());
}
// -----------------------------------------------------------------------------
// AbslHashValue for Pointers
// -----------------------------------------------------------------------------
// AbslHashValue for hashing unique_ptr
template <typename H, typename T, typename D>
H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) {
return H::combine(std::move(hash_state), ptr.get());
}
// AbslHashValue for hashing shared_ptr
template <typename H, typename T>
H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
return H::combine(std::move(hash_state), ptr.get());
}
// -----------------------------------------------------------------------------
// AbslHashValue for String-Like Types
// -----------------------------------------------------------------------------
// AbslHashValue for hashing strings
//
// All the string-like types supported here provide the same hash expansion for
// the same character sequence. These types are:
//
// - `std::string` (and std::basic_string<char, std::char_traits<char>, A> for
// any allocator A)
// - `absl::string_view` and `std::string_view`
//
// For simplicity, we currently support only `char` strings. This support may
// be broadened, if necessary, but with some caution - this overload would
// misbehave in cases where the traits' `eq()` member isn't equivalent to `==`
// on the underlying character type.
template <typename H>
H AbslHashValue(H hash_state, absl::string_view str) {
return H::combine(
H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
str.size());
}
// -----------------------------------------------------------------------------
// AbslHashValue for Sequence Containers
// -----------------------------------------------------------------------------
// AbslHashValue for hashing std::array
template <typename H, typename T, size_t N>
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
H hash_state, const std::array<T, N>& array) {
return H::combine_contiguous(std::move(hash_state), array.data(),
array.size());
}
// AbslHashValue for hashing std::deque
template <typename H, typename T, typename Allocator>
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
H hash_state, const std::deque<T, Allocator>& deque) {
// TODO(gromer): investigate a more efficient implementation taking
// advantage of the chunk structure.
for (const auto& t : deque) {
hash_state = H::combine(std::move(hash_state), t);
}
return H::combine(std::move(hash_state), deque.size());
}
// AbslHashValue for hashing std::forward_list
template <typename H, typename T, typename Allocator>
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
H hash_state, const std::forward_list<T, Allocator>& list) {
size_t size = 0;
for (const T& t : list) {
hash_state = H::combine(std::move(hash_state), t);
++size;
}
return H::combine(std::move(hash_state), size);
}
// AbslHashValue for hashing std::list
template <typename H, typename T, typename Allocator>
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
H hash_state, const std::list<T, Allocator>& list) {
for (const auto& t : list) {
hash_state = H::combine(std::move(hash_state), t);
}
return H::combine(std::move(hash_state), list.size());
}
// AbslHashValue for hashing std::vector
//
// Do not use this for vector<bool>. It does not have a .data(), and a fallback
// for std::hash<> is most likely faster.
template <typename H, typename T, typename Allocator>
typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
H>::type
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(),
vector.size()),
vector.size());
}
// -----------------------------------------------------------------------------
// AbslHashValue for Ordered Associative Containers
// -----------------------------------------------------------------------------
// AbslHashValue for hashing std::map
template <typename H, typename Key, typename T, typename Compare,
typename Allocator>
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
H>::type
AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) {
for (const auto& t : map) {
hash_state = H::combine(std::move(hash_state), t);
}
return H::combine(std::move(hash_state), map.size());
}
// AbslHashValue for hashing std::multimap
template <typename H, typename Key, typename T, typename Compare,
typename Allocator>
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
H>::type
AbslHashValue(H hash_state,
const std::multimap<Key, T, Compare, Allocator>& map) {
for (const auto& t : map) {
hash_state = H::combine(std::move(hash_state), t);
}
return H::combine(std::move(hash_state), map.size());
}
// AbslHashValue for hashing std::set
template <typename H, typename Key, typename Compare, typename Allocator>
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
H hash_state, const std::set<Key, Compare, Allocator>& set) {
for (const auto& t : set) {
hash_state = H::combine(std::move(hash_state), t);
}
return H::combine(std::move(hash_state), set.size());
}
// AbslHashValue for hashing std::multiset
template <typename H, typename Key, typename Compare, typename Allocator>
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
H hash_state, const std::multiset<Key, Compare, Allocator>& set) {
for (const auto& t : set) {
hash_state = H::combine(std::move(hash_state), t);
}
return H::combine(std::move(hash_state), set.size());
}
// -----------------------------------------------------------------------------
// AbslHashValue for Wrapper Types
// -----------------------------------------------------------------------------
// AbslHashValue for hashing absl::optional
template <typename H, typename T>
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
H hash_state, const absl::optional<T>& opt) {
if (opt) hash_state = H::combine(std::move(hash_state), *opt);
return H::combine(std::move(hash_state), opt.has_value());
}
// VariantVisitor
template <typename H>
struct VariantVisitor {
H&& hash_state;
template <typename T>
H operator()(const T& t) const {
return H::combine(std::move(hash_state), t);
}
};
// AbslHashValue for hashing absl::variant
template <typename H, typename... T>
typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type
AbslHashValue(H hash_state, const absl::variant<T...>& v) {
if (!v.valueless_by_exception()) {
hash_state = absl::visit(VariantVisitor<H>{std::move(hash_state)}, v);
}
return H::combine(std::move(hash_state), v.index());
}
// -----------------------------------------------------------------------------
// hash_range_or_bytes()
//
// Mixes all values in the range [data, data+size) into the hash state.
// This overload accepts only uniquely-represented types, and hashes them by
// hashing the entire range of bytes.
template <typename H, typename T>
typename std::enable_if<is_uniquely_represented<T>::value, H>::type
hash_range_or_bytes(H hash_state, const T* data, size_t size) {
const auto* bytes = reinterpret_cast<const unsigned char*>(data);
return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);
}
// hash_range_or_bytes()
template <typename H, typename T>
typename std::enable_if<!is_uniquely_represented<T>::value, H>::type
hash_range_or_bytes(H hash_state, const T* data, size_t size) {
for (const auto end = data + size; data < end; ++data) {
hash_state = H::combine(std::move(hash_state), *data);
}
return hash_state;
}
// InvokeHashTag
//
// InvokeHash(H, const T&) invokes the appropriate hash implementation for a
// hasher of type `H` and a value of type `T`. If `T` is not hashable, there
// will be no matching overload of InvokeHash().
// Note: Some platforms (eg MSVC) do not support the detect idiom on
// std::hash. In those platforms the last fallback will be std::hash and
// InvokeHash() will always have a valid overload even if std::hash<T> is not
// valid.
//
// We try the following options in order:
// * If is_uniquely_represented, hash bytes directly.
// * ADL AbslHashValue(H, const T&) call.
// * std::hash<T>
// In MSVC we can't probe std::hash or stdext::hash because it triggers a
// static_assert instead of failing substitution.
#if defined(_MSC_VER)
#undef ABSL_HASH_INTERNAL_CAN_POISON_
#else // _MSC_VER
#define ABSL_HASH_INTERNAL_CAN_POISON_ 1
#endif // _MSC_VER
#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \
ABSL_HASH_INTERNAL_CAN_POISON_
#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1
#endif
enum class InvokeHashTag {
kUniquelyRepresented,
kHashValue,
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
kLegacyHash,
#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
kStdHash,
kNone
};
// HashSelect
//
// Type trait to select the appropriate hash implementation to use.
// HashSelect<T>::value is an instance of InvokeHashTag that indicates the best
// available hashing mechanism.
// See `Note` above about MSVC.
template <typename T>
struct HashSelect {
private:
struct State : HashStateBase<State> {
static State combine_contiguous(State hash_state, const unsigned char*,
size_t);
using State::HashStateBase::combine_contiguous;
};
// `Probe<V, Tag>::value` evaluates to `V<T>::value` if it is a valid
// expression, and `false` otherwise.
// `Probe<V, Tag>::tag` always evaluates to `Tag`.
template <template <typename> class V, InvokeHashTag Tag>
struct Probe {
private:
template <typename U, typename std::enable_if<V<U>::value, int>::type = 0>
static std::true_type Test(int);
template <typename U>
static std::false_type Test(char);
public:
static constexpr InvokeHashTag kTag = Tag;
static constexpr bool value = decltype(
Test<absl::remove_const_t<absl::remove_reference_t<T>>>(0))::value;
};
template <typename U>
using ProbeUniquelyRepresented = is_uniquely_represented<U>;
template <typename U>
using ProbeHashValue =
std::is_same<State, decltype(AbslHashValue(std::declval<State>(),
std::declval<const U&>()))>;
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
template <typename U>
using ProbeLegacyHash =
std::is_convertible<decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<
U>()(std::declval<const U&>())),
size_t>;
#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
template <typename U>
using ProbeStdHash =
#if ABSL_HASH_INTERNAL_CAN_POISON_
std::is_convertible<decltype(std::hash<U>()(std::declval<const U&>())),
size_t>;
#else // ABSL_HASH_INTERNAL_CAN_POISON_
std::true_type;
#endif // ABSL_HASH_INTERNAL_CAN_POISON_
template <typename U>
using ProbeNone = std::true_type;
public:
// Probe each implementation in order.
// disjunction provides short circuting wrt instantiation.
static constexpr InvokeHashTag value = absl::disjunction<
Probe<ProbeUniquelyRepresented, InvokeHashTag::kUniquelyRepresented>,
Probe<ProbeHashValue, InvokeHashTag::kHashValue>,
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
Probe<ProbeLegacyHash, InvokeHashTag::kLegacyHash>,
#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
Probe<ProbeStdHash, InvokeHashTag::kStdHash>,
Probe<ProbeNone, InvokeHashTag::kNone>>::kTag;
};
template <typename T>
struct is_hashable : std::integral_constant<bool, HashSelect<T>::value !=
InvokeHashTag::kNone> {};
template <typename H, typename T>
absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kUniquelyRepresented,
H>
InvokeHash(H state, const T& value) {
return hash_internal::hash_bytes(std::move(state), value);
}
template <typename H, typename T>
absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kHashValue, H>
InvokeHash(H state, const T& value) {
return AbslHashValue(std::move(state), value);
}
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
template <typename H, typename T>
absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kLegacyHash, H>
InvokeHash(H state, const T& value) {
return hash_internal::hash_bytes(
std::move(state), ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value));
}
#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
template <typename H, typename T>
absl::enable_if_t<HashSelect<T>::value == InvokeHashTag::kStdHash, H>
InvokeHash(H state, const T& value) {
return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value));
}
// CityHashState
class CityHashState : public HashStateBase<CityHashState> {
// absl::uint128 is not an alias or a thin wrapper around the intrinsic.
// We use the intrinsic when available to improve performance.
#ifdef ABSL_HAVE_INTRINSIC_INT128
using uint128 = __uint128_t;
#else // ABSL_HAVE_INTRINSIC_INT128
using uint128 = absl::uint128;
#endif // ABSL_HAVE_INTRINSIC_INT128
static constexpr uint64_t kMul =
sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51} : uint64_t{0x9ddfea08eb382d69};
template <typename T>
using IntegralFastPath =
conjunction<std::is_integral<T>, is_uniquely_represented<T>>;
public:
// Move only
CityHashState(CityHashState&&) = default;
CityHashState& operator=(CityHashState&&) = default;
// CityHashState::combine_contiguous()
//
// Fundamental base case for hash recursion: mixes the given range of bytes
// into the hash state.
static CityHashState combine_contiguous(CityHashState hash_state,
const unsigned char* first,
size_t size) {
return CityHashState(
CombineContiguousImpl(hash_state.state_, first, size,
std::integral_constant<int, sizeof(size_t)>{}));
}
using CityHashState::HashStateBase::combine_contiguous;
// CityHashState::hash()
//
// For performance reasons in non-opt mode, we specialize this for
// integral types.
// Otherwise we would be instantiating and calling dozens of functions for
// something that is just one multiplication and a couple xor's.
// The result should be the same as running the whole algorithm, but faster.
template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
static size_t hash(T value) {
return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
}
// Overload of CityHashState::hash()
template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
static size_t hash(const T& value) {
return static_cast<size_t>(combine(CityHashState{}, value).state_);
}
private:
// Invoked only once for a given argument; that plus the fact that this is
// move-only ensures that there is only one non-moved-from object.
CityHashState() : state_(Seed()) {}
// Workaround for MSVC bug.
// We make the type copyable to fix the calling convention, even though we
// never actually copy it. Keep it private to not affect the public API of the
// type.
CityHashState(const CityHashState&) = default;
explicit CityHashState(uint64_t state) : state_(state) {}
// Implementation of the base case for combine_contiguous where we actually
// mix the bytes into the state.
// Dispatch to different implementations of the combine_contiguous depending
// on the value of `sizeof(size_t)`.
static uint64_t CombineContiguousImpl(uint64_t state,
const unsigned char* first, size_t len,
std::integral_constant<int, 4>
/* sizeof_size_t */);
static uint64_t CombineContiguousImpl(uint64_t state,
const unsigned char* first, size_t len,
std::integral_constant<int, 8>
/* sizeof_size_t*/);
// Reads 9 to 16 bytes from p.
// The first 8 bytes are in .first, the rest (zero padded) bytes are in
// .second.
static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
size_t len) {
uint64_t high = little_endian::Load64(p + len - 8);
return {little_endian::Load64(p), high >> (128 - len * 8)};
}
// Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.
static uint64_t Read4To8(const unsigned char* p, size_t len) {
return (static_cast<uint64_t>(little_endian::Load32(p + len - 4))
<< (len - 4) * 8) |
little_endian::Load32(p);
}
// Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
return static_cast<uint32_t>((p[0]) | //
(p[len / 2] << (len / 2 * 8)) | //
(p[len - 1] << ((len - 1) * 8)));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
using MultType =
absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
// We do the addition in 64-bit space to make sure the 128-bit
// multiplication is fast. If we were to do it as MultType the compiler has
// to assume that the high word is non-zero and needs to perform 2
// multiplications instead of one.
MultType m = state + v;
m *= kMul;
return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));
}
// Seed()
//
// A non-deterministic seed.
//
// The current purpose of this seed is to generate non-deterministic results
// and prevent having users depend on the particular hash values.
// It is not meant as a security feature right now, but it leaves the door
// open to upgrade it to a true per-process random seed. A true random seed
// costs more and we don't need to pay for that right now.
//
// On platforms with ASLR, we take advantage of it to make a per-process
// random value.
// See https://en.wikipedia.org/wiki/Address_space_layout_randomization
//
// On other platforms this is still going to be non-deterministic but most
// probably per-build and not per-process.
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));
}
static const void* const kSeed;
uint64_t state_;
};
// CityHashState::CombineContiguousImpl()
inline uint64_t CityHashState::CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 4> /* sizeof_size_t */) {
// For large values we use CityHash, for small ones we just use a
// multiplicative hash.
uint64_t v;
if (len > 8) {
v = absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
} else if (len >= 4) {
v = Read4To8(first, len);
} else if (len > 0) {
v = Read1To3(first, len);
} else {
// Empty ranges have no effect.
return state;
}
return Mix(state, v);
}
// Overload of CityHashState::CombineContiguousImpl()
inline uint64_t CityHashState::CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 8> /* sizeof_size_t */) {
// For large values we use CityHash, for small ones we just use a
// multiplicative hash.
uint64_t v;
if (len > 16) {
v = absl::hash_internal::CityHash64(reinterpret_cast<const char*>(first), len);
} else if (len > 8) {
auto p = Read9To16(first, len);
state = Mix(state, p.first);
v = p.second;
} else if (len >= 4) {
v = Read4To8(first, len);
} else if (len > 0) {
v = Read1To3(first, len);
} else {
// Empty ranges have no effect.
return state;
}
return Mix(state, v);
}
struct AggregateBarrier {};
// HashImpl
// Add a private base class to make sure this type is not an aggregate.
// Aggregates can be aggregate initialized even if the default constructor is
// deleted.
struct PoisonedHash : private AggregateBarrier {
PoisonedHash() = delete;
PoisonedHash(const PoisonedHash&) = delete;
PoisonedHash& operator=(const PoisonedHash&) = delete;
};
template <typename T>
struct HashImpl {
size_t operator()(const T& value) const { return CityHashState::hash(value); }
};
template <typename T>
struct Hash
: absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {};
template <typename H>
template <typename T, typename... Ts>
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
return H::combine(hash_internal::InvokeHash(std::move(state), value),
values...);
}
// HashStateBase::combine_contiguous()
template <typename H>
template <typename T>
H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
return hash_internal::hash_range_or_bytes(std::move(state), data, size);
}
} // namespace hash_internal
} // namespace absl
#endif // ABSL_HASH_INTERNAL_HASH_H_
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstdlib>
#include "absl/hash/hash.h"
// Prints the hash of argv[1].
int main(int argc, char** argv) {
if (argc < 2) return 1;
printf("%zu\n", absl::Hash<int>{}(std::atoi(argv[1]))); // NOLINT
}
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
#define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
#include <ostream>
#include <string>
#include <vector>
#include "absl/hash/hash.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
namespace absl {
namespace hash_internal {
// SpyHashState is an implementation of the HashState API that simply
// accumulates all input bytes in an internal buffer. This makes it useful
// for testing AbslHashValue overloads (so long as they are templated on the
// HashState parameter), since it can report the exact hash representation
// that the AbslHashValue overload produces.
//
// Sample usage:
// EXPECT_EQ(SpyHashState::combine(SpyHashState(), foo),
// SpyHashState::combine(SpyHashState(), bar));
template <typename T>
class SpyHashStateImpl : public HashStateBase<SpyHashStateImpl<T>> {
public:
SpyHashStateImpl()
: error_(std::make_shared<absl::optional<std::string>>()) {
static_assert(std::is_void<T>::value, "");
}
// Move-only
SpyHashStateImpl(const SpyHashStateImpl&) = delete;
SpyHashStateImpl& operator=(const SpyHashStateImpl&) = delete;
SpyHashStateImpl(SpyHashStateImpl&& other) noexcept {
*this = std::move(other);
}
SpyHashStateImpl& operator=(SpyHashStateImpl&& other) noexcept {
hash_representation_ = std::move(other.hash_representation_);
error_ = other.error_;
moved_from_ = other.moved_from_;
other.moved_from_ = true;
return *this;
}
template <typename U>
SpyHashStateImpl(SpyHashStateImpl<U>&& other) { // NOLINT
hash_representation_ = std::move(other.hash_representation_);
error_ = other.error_;
moved_from_ = other.moved_from_;
other.moved_from_ = true;
}
template <typename A, typename... Args>
static SpyHashStateImpl combine(SpyHashStateImpl s, const A& a,
const Args&... args) {
// Pass an instance of SpyHashStateImpl<A> when trying to combine `A`. This
// allows us to test that the user only uses this instance for combine calls
// and does not call AbslHashValue directly.
// See AbslHashValue implementation at the bottom.
s = SpyHashStateImpl<A>::HashStateBase::combine(std::move(s), a);
return SpyHashStateImpl::combine(std::move(s), args...);
}
static SpyHashStateImpl combine(SpyHashStateImpl s) {
if (direct_absl_hash_value_error_) {
*s.error_ = "AbslHashValue should not be invoked directly.";
} else if (s.moved_from_) {
*s.error_ = "Used moved-from instance of the hash state object.";
}
return s;
}
static void SetDirectAbslHashValueError() {
direct_absl_hash_value_error_ = true;
}
// Two SpyHashStateImpl objects are equal if they hold equal hash
// representations.
friend bool operator==(const SpyHashStateImpl& lhs,
const SpyHashStateImpl& rhs) {
return lhs.hash_representation_ == rhs.hash_representation_;
}
friend bool operator!=(const SpyHashStateImpl& lhs,
const SpyHashStateImpl& rhs) {
return !(lhs == rhs);
}
enum class CompareResult {
kEqual,
kASuffixB,
kBSuffixA,
kUnequal,
};
static CompareResult Compare(const SpyHashStateImpl& a,
const SpyHashStateImpl& b) {
const std::string a_flat = absl::StrJoin(a.hash_representation_, "");
const std::string b_flat = absl::StrJoin(b.hash_representation_, "");
if (a_flat == b_flat) return CompareResult::kEqual;
if (absl::EndsWith(a_flat, b_flat)) return CompareResult::kBSuffixA;
if (absl::EndsWith(b_flat, a_flat)) return CompareResult::kASuffixB;
return CompareResult::kUnequal;
}
// operator<< prints the hash representation as a hex and ASCII dump, to
// facilitate debugging.
friend std::ostream& operator<<(std::ostream& out,
const SpyHashStateImpl& hash_state) {
out << "[\n";
for (auto& s : hash_state.hash_representation_) {
size_t offset = 0;
for (char c : s) {
if (offset % 16 == 0) {
out << absl::StreamFormat("\n0x%04x: ", offset);
}
if (offset % 2 == 0) {
out << " ";
}
out << absl::StreamFormat("%02x", c);
++offset;
}
out << "\n";
}
return out << "]";
}
// The base case of the combine recursion, which writes raw bytes into the
// internal buffer.
static SpyHashStateImpl combine_contiguous(SpyHashStateImpl hash_state,
const unsigned char* begin,
size_t size) {
hash_state.hash_representation_.emplace_back(
reinterpret_cast<const char*>(begin), size);
return hash_state;
}
using SpyHashStateImpl::HashStateBase::combine_contiguous;
absl::optional<std::string> error() const {
if (moved_from_) {
return "Returned a moved-from instance of the hash state object.";
}
return *error_;
}
private:
template <typename U>
friend class SpyHashStateImpl;
// This is true if SpyHashStateImpl<T> has been passed to a call of
// AbslHashValue with the wrong type. This detects that the user called
// AbslHashValue directly (because the hash state type does not match).
static bool direct_absl_hash_value_error_;
std::vector<std::string> hash_representation_;
// This is a shared_ptr because we want all instances of the particular
// SpyHashState run to share the field. This way we can set the error for
// use-after-move and all the copies will see it.
std::shared_ptr<absl::optional<std::string>> error_;
bool moved_from_ = false;
};
template <typename T>
bool SpyHashStateImpl<T>::direct_absl_hash_value_error_;
template <bool& B>
struct OdrUse {
constexpr OdrUse() {}
bool& b = B;
};
template <void (*)()>
struct RunOnStartup {
static bool run;
static constexpr OdrUse<run> kOdrUse{};
};
template <void (*f)()>
bool RunOnStartup<f>::run = (f(), true);
template <
typename T, typename U,
// Only trigger for when (T != U),
absl::enable_if_t<!std::is_same<T, U>::value, int> = 0,
// This statement works in two ways:
// - First, it instantiates RunOnStartup and forces the initialization of
// `run`, which set the global variable.
// - Second, it triggers a SFINAE error disabling the overload to prevent
// compile time errors. If we didn't disable the overload we would get
// ambiguous overload errors, which we don't want.
int = RunOnStartup<SpyHashStateImpl<T>::SetDirectAbslHashValueError>::run>
void AbslHashValue(SpyHashStateImpl<T>, const U&);
using SpyHashState = SpyHashStateImpl<void>;
} // namespace hash_internal
} // namespace absl
#endif // ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment