Commit 5839a148 by Abseil Team Committed by Copybara-Service

Add a feature to container_internal::Layout that lets you specify some array…

Add a feature to container_internal::Layout that lets you specify some array sizes at compile-time as template parameters. This can make offset and size calculations faster.

In particular, it seems to always improve the performance of AllocSize(), and it sometimes improves the performance of other functions, e.g. when the Layout object outlives the function that created it.

PiperOrigin-RevId: 616817169
Change-Id: Id1d318d7d2af68783f9f59090d89c642be6ae558
parent 56d3f227
...@@ -81,9 +81,30 @@ ...@@ -81,9 +81,30 @@
// } // }
// //
// The layout we used above combines fixed-size with dynamically-sized fields. // The layout we used above combines fixed-size with dynamically-sized fields.
// This is quite common. Layout is optimized for this use case and generates // This is quite common. Layout is optimized for this use case and attempts to
// optimal code. All computations that can be performed at compile time are // generate optimal code. To help the compiler do that in more cases, you can
// indeed performed at compile time. // specify the fixed sizes using `WithStaticSizes`. This ensures that all
// computations that can be performed at compile time are indeed performed at
// compile time. E.g.:
//
// using SL = L::WithStaticSizes<1, 1>;
//
// void Use(unsigned char* p) {
// // First, extract N and M.
// // Using `prefix` we can access the first three arrays but not more.
// //
// // More details: The first element always has offset 0. `SL`
// // has offsets for the second and third array based on sizes of
// // the first and second array, specified via `WithStaticSizes`.
// constexpr auto prefix = SL::Partial();
// size_t n = *prefix.Pointer<0>(p);
// size_t m = *prefix.Pointer<1>(p);
//
// // Now we can get a pointer to the final payload.
// const SL layout(n, m);
// double* a = layout.Pointer<double>(p);
// int* b = layout.Pointer<int>(p);
// }
// //
// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to // Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no // ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
...@@ -107,7 +128,7 @@ ...@@ -107,7 +128,7 @@
// CompactString(const char* s = "") { // CompactString(const char* s = "") {
// const size_t size = strlen(s); // const size_t size = strlen(s);
// // size_t[1] followed by char[size + 1]. // // size_t[1] followed by char[size + 1].
// const L layout(1, size + 1); // const L layout(size + 1);
// p_.reset(new unsigned char[layout.AllocSize()]); // p_.reset(new unsigned char[layout.AllocSize()]);
// // If running under ASAN, mark the padding bytes, if any, to catch // // If running under ASAN, mark the padding bytes, if any, to catch
// // memory errors. // // memory errors.
...@@ -125,14 +146,13 @@ ...@@ -125,14 +146,13 @@
// //
// const char* c_str() const { // const char* c_str() const {
// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)). // // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
// // The argument in Partial(1) specifies that we have size_t[1] in front // return L::Partial().Pointer<char>(p_.get());
// // of the characters.
// return L::Partial(1).Pointer<char>(p_.get());
// } // }
// //
// private: // private:
// // Our heap allocation contains a size_t followed by an array of chars. // // Our heap allocation contains a single size_t followed by an array of
// using L = Layout<size_t, char>; // // chars.
// using L = Layout<size_t, char>::WithStaticSizes<1>;
// std::unique_ptr<unsigned char[]> p_; // std::unique_ptr<unsigned char[]> p_;
// }; // };
// //
...@@ -146,11 +166,12 @@ ...@@ -146,11 +166,12 @@
// //
// The interface exported by this file consists of: // The interface exported by this file consists of:
// - class `Layout<>` and its public members. // - class `Layout<>` and its public members.
// - The public members of class `internal_layout::LayoutImpl<>`. That class // - The public members of classes `internal_layout::LayoutWithStaticSizes<>`
// isn't intended to be used directly, and its name and template parameter // and `internal_layout::LayoutImpl<>`. Those classes aren't intended to be
// list are internal implementation details, but the class itself provides // used directly, and their name and template parameter list are internal
// most of the functionality in this file. See comments on its members for // implementation details, but the classes themselves provide most of the
// detailed documentation. // functionality in this file. See comments on their members for detailed
// documentation.
// //
// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a // `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)` // `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
...@@ -164,7 +185,7 @@ ...@@ -164,7 +185,7 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <ostream> #include <array>
#include <string> #include <string>
#include <tuple> #include <tuple>
#include <type_traits> #include <type_traits>
...@@ -210,9 +231,6 @@ struct NotAligned<const Aligned<T, N>> { ...@@ -210,9 +231,6 @@ struct NotAligned<const Aligned<T, N>> {
template <size_t> template <size_t>
using IntToSize = size_t; using IntToSize = size_t;
template <class>
using TypeToSize = size_t;
template <class T> template <class T>
struct Type : NotAligned<T> { struct Type : NotAligned<T> {
using type = T; using type = T;
...@@ -309,7 +327,8 @@ using IsLegalElementType = std::integral_constant< ...@@ -309,7 +327,8 @@ using IsLegalElementType = std::integral_constant<
!std::is_volatile<typename Type<T>::type>::value && !std::is_volatile<typename Type<T>::type>::value &&
adl_barrier::IsPow2(AlignOf<T>::value)>; adl_barrier::IsPow2(AlignOf<T>::value)>;
template <class Elements, class SizeSeq, class OffsetSeq> template <class Elements, class StaticSizeSeq, class RuntimeSizeSeq,
class SizeSeq, class OffsetSeq>
class LayoutImpl; class LayoutImpl;
// Public base class of `Layout` and the result type of `Layout::Partial()`. // Public base class of `Layout` and the result type of `Layout::Partial()`.
...@@ -317,31 +336,49 @@ class LayoutImpl; ...@@ -317,31 +336,49 @@ class LayoutImpl;
// `Elements...` contains all template arguments of `Layout` that created this // `Elements...` contains all template arguments of `Layout` that created this
// instance. // instance.
// //
// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments // `StaticSizeSeq...` is an index_sequence containing the sizes specified at
// passed to `Layout::Partial()` or `Layout::Layout()`. // compile-time.
//
// `RuntimeSizeSeq...` is `[0, NumRuntimeSizes)`, where `NumRuntimeSizes` is the
// number of arguments passed to `Layout::Partial()` or `Layout::Layout()`.
//
// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is `NumRuntimeSizes` plus
// the number of sizes in `StaticSizeSeq`.
// //
// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is // `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we // `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
// can compute offsets). // can compute offsets).
template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq> template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, size_t... SizeSeq, size_t... OffsetSeq>
class LayoutImpl<
std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>> { absl::index_sequence<OffsetSeq...>> {
private: private:
static_assert(sizeof...(Elements) > 0, "At least one field is required"); static_assert(sizeof...(Elements) > 0, "At least one field is required");
static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value, static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
"Invalid element type (see IsLegalElementType)"); "Invalid element type (see IsLegalElementType)");
static_assert(sizeof...(StaticSizeSeq) <= sizeof...(Elements),
"Too many static sizes specified");
enum { enum {
NumTypes = sizeof...(Elements), NumTypes = sizeof...(Elements),
NumStaticSizes = sizeof...(StaticSizeSeq),
NumRuntimeSizes = sizeof...(RuntimeSizeSeq),
NumSizes = sizeof...(SizeSeq), NumSizes = sizeof...(SizeSeq),
NumOffsets = sizeof...(OffsetSeq), NumOffsets = sizeof...(OffsetSeq),
}; };
// These are guaranteed by `Layout`. // These are guaranteed by `Layout`.
static_assert(NumStaticSizes + NumRuntimeSizes == NumSizes, "Internal error");
static_assert(NumSizes <= NumTypes, "Internal error");
static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
"Internal error"); "Internal error");
static_assert(NumTypes > 0, "Internal error"); static_assert(NumTypes > 0, "Internal error");
static constexpr std::array<size_t, sizeof...(StaticSizeSeq)> kStaticSizes = {
StaticSizeSeq...};
// Returns the index of `T` in `Elements...`. Results in a compilation error // Returns the index of `T` in `Elements...`. Results in a compilation error
// if `Elements...` doesn't contain exactly one instance of `T`. // if `Elements...` doesn't contain exactly one instance of `T`.
template <class T> template <class T>
...@@ -364,7 +401,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -364,7 +401,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
template <size_t N> template <size_t N>
using ElementType = typename std::tuple_element<N, ElementTypes>::type; using ElementType = typename std::tuple_element<N, ElementTypes>::type;
constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes) constexpr explicit LayoutImpl(IntToSize<RuntimeSizeSeq>... sizes)
: size_{sizes...} {} : size_{sizes...} {}
// Alignment of the layout, equal to the strictest alignment of all elements. // Alignment of the layout, equal to the strictest alignment of all elements.
...@@ -390,7 +427,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -390,7 +427,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const { constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds"); static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align( return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1], Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
ElementAlignment<N>::value); ElementAlignment<N>::value);
} }
...@@ -412,8 +449,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -412,8 +449,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
return {{Offset<OffsetSeq>()...}}; return {{Offset<OffsetSeq>()...}};
} }
// The number of elements in the Nth array. This is the Nth argument of // The number of elements in the Nth array (zero-based).
// `Layout::Partial()` or `Layout::Layout()` (zero-based).
// //
// // int[3], 4 bytes of padding, double[4]. // // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4); // Layout<int, double> x(3, 4);
...@@ -421,10 +457,15 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -421,10 +457,15 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// assert(x.Size<1>() == 4); // assert(x.Size<1>() == 4);
// //
// Requires: `N < NumSizes`. // Requires: `N < NumSizes`.
template <size_t N> template <size_t N, EnableIf<(N < NumStaticSizes)> = 0>
constexpr size_t Size() const {
return kStaticSizes[N];
}
template <size_t N, EnableIf<(N >= NumStaticSizes)> = 0>
constexpr size_t Size() const { constexpr size_t Size() const {
static_assert(N < NumSizes, "Index out of bounds"); static_assert(N < NumSizes, "Index out of bounds");
return size_[N]; return size_[N - NumStaticSizes];
} }
// The number of elements in the array with the specified element type. // The number of elements in the array with the specified element type.
...@@ -579,7 +620,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -579,7 +620,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const { constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() + return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1]; SizeOf<ElementType<NumTypes - 1>>::value * Size<NumTypes - 1>();
} }
// If built with --config=asan, poisons padding bytes (if any) in the // If built with --config=asan, poisons padding bytes (if any) in the
...@@ -603,7 +644,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -603,7 +644,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// The `if` is an optimization. It doesn't affect the observable behaviour. // The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) { if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start = size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1]; Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>();
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start); ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
} }
#endif #endif
...@@ -632,47 +673,66 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>, ...@@ -632,47 +673,66 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
adl_barrier::TypeName<ElementType<OffsetSeq>>()...}; adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) { for (size_t i = 0; i != NumOffsets - 1; ++i) {
absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], absl::StrAppend(&res, "[", DebugSize(i), "]; @", offsets[i + 1],
"(", sizes[i + 1], ")"); types[i + 1], "(", sizes[i + 1], ")");
} }
// NumSizes is a constant that may be zero. Some compilers cannot see that // NumSizes is a constant that may be zero. Some compilers cannot see that
// inside the if statement "size_[NumSizes - 1]" must be valid. // inside the if statement "size_[NumSizes - 1]" must be valid.
int last = static_cast<int>(NumSizes) - 1; int last = static_cast<int>(NumSizes) - 1;
if (NumTypes == NumSizes && last >= 0) { if (NumTypes == NumSizes && last >= 0) {
absl::StrAppend(&res, "[", size_[last], "]"); absl::StrAppend(&res, "[", DebugSize(static_cast<size_t>(last)), "]");
} }
return res; return res;
} }
private: private:
size_t DebugSize(size_t n) const {
if (n < NumStaticSizes) {
return kStaticSizes[n];
} else {
return size_[n - NumStaticSizes];
}
}
// Arguments of `Layout::Partial()` or `Layout::Layout()`. // Arguments of `Layout::Partial()` or `Layout::Layout()`.
size_t size_[NumSizes > 0 ? NumSizes : 1]; size_t size_[NumRuntimeSizes > 0 ? NumRuntimeSizes : 1];
}; };
template <size_t NumSizes, class... Ts> // Defining a constexpr static class member variable is redundant and deprecated
using LayoutType = LayoutImpl< // in C++17, but required in C++14.
std::tuple<Ts...>, absl::make_index_sequence<NumSizes>, template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>; size_t... SizeSeq, size_t... OffsetSeq>
constexpr std::array<size_t, sizeof...(StaticSizeSeq)> LayoutImpl<
std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>>::kStaticSizes;
} // namespace internal_layout template <class StaticSizeSeq, size_t NumRuntimeSizes, class... Ts>
using LayoutType = LayoutImpl<
std::tuple<Ts...>, StaticSizeSeq,
absl::make_index_sequence<NumRuntimeSizes>,
absl::make_index_sequence<NumRuntimeSizes + StaticSizeSeq::size()>,
absl::make_index_sequence<adl_barrier::Min(
sizeof...(Ts), NumRuntimeSizes + StaticSizeSeq::size() + 1)>>;
template <class StaticSizeSeq, class... Ts>
class LayoutWithStaticSizes
: public LayoutType<StaticSizeSeq,
sizeof...(Ts) - adl_barrier::Min(sizeof...(Ts),
StaticSizeSeq::size()),
Ts...> {
private:
using Super =
LayoutType<StaticSizeSeq,
sizeof...(Ts) -
adl_barrier::Min(sizeof...(Ts), StaticSizeSeq::size()),
Ts...>;
// Descriptor of arrays of various types and sizes laid out in memory one after
// another. See the top of the file for documentation.
//
// Check out the public API of internal_layout::LayoutImpl above. The type is
// internal to the library but its methods are public, and they are inherited
// by `Layout`.
template <class... Ts>
class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
public: public:
static_assert(sizeof...(Ts) > 0, "At least one field is required");
static_assert(
absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
"Invalid element type (see IsLegalElementType)");
// The result type of `Partial()` with `NumSizes` arguments. // The result type of `Partial()` with `NumSizes` arguments.
template <size_t NumSizes> template <size_t NumSizes>
using PartialType = internal_layout::LayoutType<NumSizes, Ts...>; using PartialType =
internal_layout::LayoutType<StaticSizeSeq, NumSizes, Ts...>;
// `Layout` knows the element types of the arrays we want to lay out in // `Layout` knows the element types of the arrays we want to lay out in
// memory but not the number of elements in each array. // memory but not the number of elements in each array.
...@@ -698,14 +758,18 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> { ...@@ -698,14 +758,18 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
// Note: The sizes of the arrays must be specified in number of elements, // Note: The sizes of the arrays must be specified in number of elements,
// not in bytes. // not in bytes.
// //
// Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. // Requires: `sizeof...(Sizes) + NumStaticSizes <= sizeof...(Ts)`.
// Requires: all arguments are convertible to `size_t`. // Requires: all arguments are convertible to `size_t`.
template <class... Sizes> template <class... Sizes>
static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) { static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); static_assert(sizeof...(Sizes) + StaticSizeSeq::size() <= sizeof...(Ts),
return PartialType<sizeof...(Sizes)>(std::forward<Sizes>(sizes)...); "");
return PartialType<sizeof...(Sizes)>(
static_cast<size_t>(std::forward<Sizes>(sizes))...);
} }
// Inherit LayoutType's constructor.
//
// Creates a layout with the sizes of all arrays specified. If you know // Creates a layout with the sizes of all arrays specified. If you know
// only the sizes of the first N arrays (where N can be zero), you can use // only the sizes of the first N arrays (where N can be zero), you can use
// `Partial()` defined above. The constructor is essentially equivalent to // `Partial()` defined above. The constructor is essentially equivalent to
...@@ -714,8 +778,69 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> { ...@@ -714,8 +778,69 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
// //
// Note: The sizes of the arrays must be specified in number of elements, // Note: The sizes of the arrays must be specified in number of elements,
// not in bytes. // not in bytes.
constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes) //
: internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {} // Implementation note: we do this via a `using` declaration instead of
// defining our own explicit constructor because the signature of LayoutType's
// constructor depends on RuntimeSizeSeq, which we don't have access to here.
// If we defined our own constructor here, it would have to use a parameter
// pack and then cast the arguments to size_t when calling the superclass
// constructor, similar to what Partial() does. But that would suffer from the
// same problem that Partial() has, which is that the parameter types are
// inferred from the arguments, which may be signed types, which must then be
// cast to size_t. This can lead to negative values being silently (i.e. with
// no compiler warnings) cast to an unsigned type. Having a constructor with
// size_t parameters helps the compiler generate better warnings about
// potential bad casts, while avoiding false warnings when positive literal
// arguments are used. If an argument is a positive literal integer (e.g.
// `1`), the compiler will understand that it can be safely converted to
// size_t, and hence not generate a warning. But if a negative literal (e.g.
// `-1`) or a variable with signed type is used, then it can generate a
// warning about a potentially unsafe implicit cast. It would be great if we
// could do this for Partial() too, but unfortunately as of C++23 there seems
// to be no way to define a function with a variable number of paramters of a
// certain type, a.k.a. homogenous function parameter packs. So we're forced
// to choose between explicitly casting the arguments to size_t, which
// suppresses all warnings, even potentially valid ones, or implicitly casting
// them to size_t, which generates bogus warnings whenever literal arguments
// are used, even if they're positive.
using Super::Super;
};
} // namespace internal_layout
// Descriptor of arrays of various types and sizes laid out in memory one after
// another. See the top of the file for documentation.
//
// Check out the public API of internal_layout::LayoutWithStaticSizes and
// internal_layout::LayoutImpl above. Those types are internal to the library
// but their methods are public, and they are inherited by `Layout`.
template <class... Ts>
class Layout : public internal_layout::LayoutWithStaticSizes<
absl::make_index_sequence<0>, Ts...> {
private:
using Super =
internal_layout::LayoutWithStaticSizes<absl::make_index_sequence<0>,
Ts...>;
public:
// If you know the sizes of some or all of the arrays at compile time, you can
// use `WithStaticSizes` or `WithStaticSizeSequence` to create a `Layout` type
// with those sizes baked in. This can help the compiler generate optimal code
// for calculating array offsets and AllocSize().
//
// Like `Partial()`, the N sizes you specify are for the first N arrays, and
// they specify the number of elements in each array, not the number of bytes.
template <class StaticSizeSeq>
using WithStaticSizeSequence =
internal_layout::LayoutWithStaticSizes<StaticSizeSeq, Ts...>;
template <size_t... StaticSizes>
using WithStaticSizes =
WithStaticSizeSequence<std::index_sequence<StaticSizes...>>;
// Inherit LayoutWithStaticSizes's constructor, which requires you to specify
// all the array sizes.
using Super::Super;
}; };
} // namespace container_internal } // namespace container_internal
......
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
// Every benchmark should have the same performance as the corresponding // Every benchmark should have the same performance as the corresponding
// headroom benchmark. // headroom benchmark.
#include <cstddef>
#include <cstdint>
#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/layout.h" #include "absl/container/internal/layout.h"
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
...@@ -28,6 +31,8 @@ using ::benchmark::DoNotOptimize; ...@@ -28,6 +31,8 @@ using ::benchmark::DoNotOptimize;
using Int128 = int64_t[2]; using Int128 = int64_t[2];
constexpr size_t MyAlign(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
// This benchmark provides the upper bound on performance for BM_OffsetConstant. // This benchmark provides the upper bound on performance for BM_OffsetConstant.
template <size_t Offset, class... Ts> template <size_t Offset, class... Ts>
void BM_OffsetConstantHeadroom(benchmark::State& state) { void BM_OffsetConstantHeadroom(benchmark::State& state) {
...@@ -37,6 +42,15 @@ void BM_OffsetConstantHeadroom(benchmark::State& state) { ...@@ -37,6 +42,15 @@ void BM_OffsetConstantHeadroom(benchmark::State& state) {
} }
template <size_t Offset, class... Ts> template <size_t Offset, class... Ts>
void BM_OffsetConstantStatic(benchmark::State& state) {
using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
ABSL_RAW_CHECK(L::Partial().template Offset<3>() == Offset, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(L::Partial().template Offset<3>());
}
}
template <size_t Offset, class... Ts>
void BM_OffsetConstant(benchmark::State& state) { void BM_OffsetConstant(benchmark::State& state) {
using L = Layout<Ts...>; using L = Layout<Ts...>;
ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset, ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset,
...@@ -46,14 +60,75 @@ void BM_OffsetConstant(benchmark::State& state) { ...@@ -46,14 +60,75 @@ void BM_OffsetConstant(benchmark::State& state) {
} }
} }
template <size_t Offset, class... Ts>
void BM_OffsetConstantIndirect(benchmark::State& state) {
using L = Layout<Ts...>;
auto p = L::Partial(3, 5, 7);
ABSL_RAW_CHECK(p.template Offset<3>() == Offset, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(p);
DoNotOptimize(p.template Offset<3>());
}
}
template <class... Ts>
size_t PartialOffset(size_t k);
template <>
size_t PartialOffset<int8_t, int16_t, int32_t, Int128>(size_t k) {
constexpr size_t o = MyAlign(MyAlign(3 * 1, 2) + 5 * 2, 4);
// return Align(o + k * 4, 8);
return (o + k * 4 + 7) & ~7U;
}
template <>
size_t PartialOffset<Int128, int32_t, int16_t, int8_t>(size_t k) {
// No alignment is necessary.
return 3 * 16 + 5 * 4 + k * 2;
}
// This benchmark provides the upper bound on performance for BM_OffsetVariable.
template <size_t Offset, class... Ts>
void BM_OffsetPartialHeadroom(benchmark::State& state) {
size_t k = 7;
ABSL_RAW_CHECK(PartialOffset<Ts...>(k) == Offset, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(k);
DoNotOptimize(PartialOffset<Ts...>(k));
}
}
template <size_t Offset, class... Ts>
void BM_OffsetPartialStatic(benchmark::State& state) {
using L = typename Layout<Ts...>::template WithStaticSizes<3, 5>;
size_t k = 7;
ABSL_RAW_CHECK(L::Partial(k).template Offset<3>() == Offset,
"Invalid offset");
for (auto _ : state) {
DoNotOptimize(k);
DoNotOptimize(L::Partial(k).template Offset<3>());
}
}
template <size_t Offset, class... Ts>
void BM_OffsetPartial(benchmark::State& state) {
using L = Layout<Ts...>;
size_t k = 7;
ABSL_RAW_CHECK(L::Partial(3, 5, k).template Offset<3>() == Offset,
"Invalid offset");
for (auto _ : state) {
DoNotOptimize(k);
DoNotOptimize(L::Partial(3, 5, k).template Offset<3>());
}
}
template <class... Ts> template <class... Ts>
size_t VariableOffset(size_t n, size_t m, size_t k); size_t VariableOffset(size_t n, size_t m, size_t k);
template <> template <>
size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m, size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
size_t k) { size_t k) {
auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }; return MyAlign(MyAlign(MyAlign(n * 1, 2) + m * 2, 4) + k * 4, 8);
return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8);
} }
template <> template <>
...@@ -94,6 +169,75 @@ void BM_OffsetVariable(benchmark::State& state) { ...@@ -94,6 +169,75 @@ void BM_OffsetVariable(benchmark::State& state) {
} }
} }
template <class... Ts>
size_t AllocSize(size_t x);
template <>
size_t AllocSize<int8_t, int16_t, int32_t, Int128>(size_t x) {
constexpr size_t o =
Layout<int8_t, int16_t, int32_t, Int128>::Partial(3, 5, 7)
.template Offset<Int128>();
return o + sizeof(Int128) * x;
}
template <>
size_t AllocSize<Int128, int32_t, int16_t, int8_t>(size_t x) {
constexpr size_t o =
Layout<Int128, int32_t, int16_t, int8_t>::Partial(3, 5, 7)
.template Offset<int8_t>();
return o + sizeof(int8_t) * x;
}
// This benchmark provides the upper bound on performance for BM_AllocSize
template <size_t Size, class... Ts>
void BM_AllocSizeHeadroom(benchmark::State& state) {
size_t x = 9;
ABSL_RAW_CHECK(AllocSize<Ts...>(x) == Size, "Invalid size");
for (auto _ : state) {
DoNotOptimize(x);
DoNotOptimize(AllocSize<Ts...>(x));
}
}
template <size_t Size, class... Ts>
void BM_AllocSizeStatic(benchmark::State& state) {
using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
size_t x = 9;
ABSL_RAW_CHECK(L(x).AllocSize() == Size, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(x);
DoNotOptimize(L(x).AllocSize());
}
}
template <size_t Size, class... Ts>
void BM_AllocSize(benchmark::State& state) {
using L = Layout<Ts...>;
size_t n = 3;
size_t m = 5;
size_t k = 7;
size_t x = 9;
ABSL_RAW_CHECK(L(n, m, k, x).AllocSize() == Size, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(n);
DoNotOptimize(m);
DoNotOptimize(k);
DoNotOptimize(x);
DoNotOptimize(L(n, m, k, x).AllocSize());
}
}
template <size_t Size, class... Ts>
void BM_AllocSizeIndirect(benchmark::State& state) {
using L = Layout<Ts...>;
auto l = L(3, 5, 7, 9);
ABSL_RAW_CHECK(l.AllocSize() == Size, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(l);
DoNotOptimize(l.AllocSize());
}
}
// Run all benchmarks in two modes: // Run all benchmarks in two modes:
// //
// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?]. // Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
...@@ -106,16 +250,46 @@ void BM_OffsetVariable(benchmark::State& state) { ...@@ -106,16 +250,46 @@ void BM_OffsetVariable(benchmark::State& state) {
OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t, OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
Int128); Int128);
OFFSET_BENCHMARK(BM_OffsetConstantStatic, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128); OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t, OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
int8_t); int8_t);
OFFSET_BENCHMARK(BM_OffsetConstantStatic, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t); OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetPartialStatic, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetPartial, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetPartialStatic, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetPartial, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t, OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
Int128); Int128);
OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128); OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t, OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
int8_t); int8_t);
OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t); OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSizeStatic, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSize, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSizeIndirect, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 91, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSizeStatic, 91, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSize, 91, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSizeIndirect, 91, Int128, int32_t, int16_t, int8_t);
} // namespace } // namespace
} // namespace container_internal } // namespace container_internal
ABSL_NAMESPACE_END ABSL_NAMESPACE_END
......
...@@ -68,9 +68,7 @@ struct alignas(8) Int128 { ...@@ -68,9 +68,7 @@ struct alignas(8) Int128 {
// int64_t is *not* 8-byte aligned on all platforms! // int64_t is *not* 8-byte aligned on all platforms!
struct alignas(8) Int64 { struct alignas(8) Int64 {
int64_t a; int64_t a;
friend bool operator==(Int64 lhs, Int64 rhs) { friend bool operator==(Int64 lhs, Int64 rhs) { return lhs.a == rhs.a; }
return lhs.a == rhs.a;
}
}; };
// Properties of types that this test relies on. // Properties of types that this test relies on.
...@@ -271,6 +269,35 @@ TEST(Layout, Offsets) { ...@@ -271,6 +269,35 @@ TEST(Layout, Offsets) {
} }
} }
TEST(Layout, StaticOffsets) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(SL::Partial(5).Offsets(), ElementsAre(0, 8));
EXPECT_THAT(SL::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8));
EXPECT_THAT(SL::Partial(3).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL::Partial(3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(3, 1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5, 3>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL::Partial(1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5, 3, 1>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL().Offsets(), ElementsAre(0, 8, 24));
}
}
TEST(Layout, AllocSize) { TEST(Layout, AllocSize) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
...@@ -295,6 +322,30 @@ TEST(Layout, AllocSize) { ...@@ -295,6 +322,30 @@ TEST(Layout, AllocSize) {
} }
} }
TEST(Layout, StaticAllocSize) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_EQ(136, SL::Partial(3, 5, 7).AllocSize());
EXPECT_EQ(136, SL(3, 5, 7).AllocSize());
}
{
using SL = L::WithStaticSizes<3>;
EXPECT_EQ(136, SL::Partial(5, 7).AllocSize());
EXPECT_EQ(136, SL(5, 7).AllocSize());
}
{
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(136, SL::Partial(7).AllocSize());
EXPECT_EQ(136, SL(7).AllocSize());
}
{
using SL = L::WithStaticSizes<3, 5, 7>;
EXPECT_EQ(136, SL::Partial().AllocSize());
EXPECT_EQ(136, SL().AllocSize());
}
}
TEST(Layout, SizeByIndex) { TEST(Layout, SizeByIndex) {
{ {
using L = Layout<int32_t>; using L = Layout<int32_t>;
...@@ -370,6 +421,27 @@ TEST(Layout, Sizes) { ...@@ -370,6 +421,27 @@ TEST(Layout, Sizes) {
} }
} }
TEST(Layout, StaticSize) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_THAT(SL::Partial().Sizes(), ElementsAre());
EXPECT_THAT(SL::Partial(3).Size<0>(), 3);
EXPECT_THAT(SL::Partial(3).Size<int8_t>(), 3);
EXPECT_THAT(SL::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(SL::Partial(3, 5, 7).Size<0>(), 3);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<int8_t>(), 3);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<2>(), 7);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<Int128>(), 7);
EXPECT_THAT(SL::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
EXPECT_THAT(SL(3, 5, 7).Size<0>(), 3);
EXPECT_THAT(SL(3, 5, 7).Size<int8_t>(), 3);
EXPECT_THAT(SL(3, 5, 7).Size<2>(), 7);
EXPECT_THAT(SL(3, 5, 7).Size<Int128>(), 7);
EXPECT_THAT(SL(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
}
}
TEST(Layout, PointerByIndex) { TEST(Layout, PointerByIndex) {
alignas(max_align_t) const unsigned char p[100] = {0}; alignas(max_align_t) const unsigned char p[100] = {0};
{ {
...@@ -720,6 +792,61 @@ TEST(Layout, MutablePointers) { ...@@ -720,6 +792,61 @@ TEST(Layout, MutablePointers) {
} }
} }
TEST(Layout, StaticPointers) {
alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::WithStaticSizes<>::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<const int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::WithStaticSizes<>::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1>::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<>::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1>::Partial(2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1, 2>::Partial(3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1, 2, 3>::Partial();
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const L::WithStaticSizes<1, 2, 3> x;
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
}
TEST(Layout, SliceByIndexSize) { TEST(Layout, SliceByIndexSize) {
alignas(max_align_t) const unsigned char p[100] = {0}; alignas(max_align_t) const unsigned char p[100] = {0};
{ {
...@@ -769,7 +896,6 @@ TEST(Layout, SliceByTypeSize) { ...@@ -769,7 +896,6 @@ TEST(Layout, SliceByTypeSize) {
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size()); EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
} }
} }
TEST(Layout, MutableSliceByIndexSize) { TEST(Layout, MutableSliceByIndexSize) {
alignas(max_align_t) unsigned char p[100] = {0}; alignas(max_align_t) unsigned char p[100] = {0};
{ {
...@@ -820,6 +946,39 @@ TEST(Layout, MutableSliceByTypeSize) { ...@@ -820,6 +946,39 @@ TEST(Layout, MutableSliceByTypeSize) {
} }
} }
TEST(Layout, StaticSliceSize) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int32_t, Int128>;
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(3, SL::Partial().Slice<0>(cp).size());
EXPECT_EQ(3, SL::Partial().Slice<int8_t>(cp).size());
EXPECT_EQ(3, SL::Partial(7).Slice<0>(cp).size());
EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(cp).size());
EXPECT_EQ(5, SL::Partial().Slice<1>(cp).size());
EXPECT_EQ(5, SL::Partial().Slice<int32_t>(cp).size());
EXPECT_EQ(5, SL::Partial(7).Slice<1>(cp).size());
EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(cp).size());
EXPECT_EQ(7, SL::Partial(7).Slice<2>(cp).size());
EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(cp).size());
EXPECT_EQ(3, SL::Partial().Slice<0>(p).size());
EXPECT_EQ(3, SL::Partial().Slice<int8_t>(p).size());
EXPECT_EQ(3, SL::Partial(7).Slice<0>(p).size());
EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(p).size());
EXPECT_EQ(5, SL::Partial().Slice<1>(p).size());
EXPECT_EQ(5, SL::Partial().Slice<int32_t>(p).size());
EXPECT_EQ(5, SL::Partial(7).Slice<1>(p).size());
EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(p).size());
EXPECT_EQ(7, SL::Partial(7).Slice<2>(p).size());
EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(p).size());
}
TEST(Layout, SliceByIndexData) { TEST(Layout, SliceByIndexData) {
alignas(max_align_t) const unsigned char p[100] = {0}; alignas(max_align_t) const unsigned char p[100] = {0};
{ {
...@@ -1230,6 +1389,39 @@ TEST(Layout, MutableSliceByTypeData) { ...@@ -1230,6 +1389,39 @@ TEST(Layout, MutableSliceByTypeData) {
} }
} }
TEST(Layout, StaticSliceData) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int32_t, Int128>;
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<0>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<int8_t>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<0>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<int8_t>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<1>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<int32_t>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<1>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<int32_t>(cp).data()));
EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<2>(cp).data()));
EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<Int128>(cp).data()));
EXPECT_EQ(0, Distance(p, SL::Partial().Slice<0>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial().Slice<int8_t>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<0>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<int8_t>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial().Slice<1>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial().Slice<int32_t>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<1>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<int32_t>(p).data()));
EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<2>(p).data()));
EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<Int128>(p).data()));
}
MATCHER_P(IsSameSlice, slice, "") { MATCHER_P(IsSameSlice, slice, "") {
return arg.size() == slice.size() && arg.data() == slice.data(); return arg.size() == slice.size() && arg.data() == slice.data();
} }
...@@ -1339,6 +1531,43 @@ TEST(Layout, MutableSlices) { ...@@ -1339,6 +1531,43 @@ TEST(Layout, MutableSlices) {
} }
} }
TEST(Layout, StaticSlices) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using SL = Layout<int8_t, int8_t, Int128>::WithStaticSizes<1, 2>;
{
const auto x = SL::Partial();
EXPECT_THAT(
(Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(
x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = SL::Partial(3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
IsSameSlice(x.Slice<2>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const SL x(3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
IsSameSlice(x.Slice<2>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, UnalignedTypes) { TEST(Layout, UnalignedTypes) {
constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3); constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
...@@ -1377,6 +1606,36 @@ TEST(Layout, Alignment) { ...@@ -1377,6 +1606,36 @@ TEST(Layout, Alignment) {
static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, ""); static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, ""); static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, ""); static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
}
TEST(Layout, StaticAlignment) {
static_assert(Layout<int8_t>::WithStaticSizes<>::Alignment() == 1, "");
static_assert(Layout<int8_t>::WithStaticSizes<0>::Alignment() == 1, "");
static_assert(Layout<int8_t>::WithStaticSizes<7>::Alignment() == 1, "");
static_assert(Layout<int32_t>::WithStaticSizes<>::Alignment() == 4, "");
static_assert(Layout<int32_t>::WithStaticSizes<0>::Alignment() == 4, "");
static_assert(Layout<int32_t>::WithStaticSizes<3>::Alignment() == 4, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<0>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<>::Alignment() == 8, "");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<0, 0, 0>::Alignment() ==
8,
"");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<1, 1, 1>::Alignment() ==
8,
"");
} }
TEST(Layout, ConstexprPartial) { TEST(Layout, ConstexprPartial) {
...@@ -1384,6 +1643,15 @@ TEST(Layout, ConstexprPartial) { ...@@ -1384,6 +1643,15 @@ TEST(Layout, ConstexprPartial) {
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3); constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
} }
TEST(Layout, StaticConstexpr) {
constexpr size_t M = alignof(max_align_t);
using L = Layout<unsigned char, Aligned<unsigned char, 2 * M>>;
using SL = L::WithStaticSizes<1, 3>;
constexpr SL x;
static_assert(x.Offset<1>() == 2 * M, "");
}
// [from, to) // [from, to)
struct Region { struct Region {
size_t from; size_t from;
...@@ -1458,6 +1726,41 @@ TEST(Layout, PoisonPadding) { ...@@ -1458,6 +1726,41 @@ TEST(Layout, PoisonPadding) {
} }
} }
TEST(Layout, StaticPoisonPadding) {
using L = Layout<int8_t, Int64, int32_t, Int128>;
using SL = L::WithStaticSizes<1, 2>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
constexpr auto x = SL::Partial();
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = SL::Partial(3);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr auto x = SL::Partial(3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr SL x(3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
}
TEST(Layout, DebugString) { TEST(Layout, DebugString) {
{ {
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(); constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
...@@ -1500,6 +1803,62 @@ TEST(Layout, DebugString) { ...@@ -1500,6 +1803,62 @@ TEST(Layout, DebugString) {
} }
} }
TEST(Layout, StaticDebugString) {
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial();
EXPECT_EQ("@0<signed char>(1)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial();
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1,
2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial(2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t,
Int128>::WithStaticSizes<1, 2>::Partial();
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t,
Int128>::WithStaticSizes<1, 2, 3, 4>::Partial();
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
{
constexpr Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1, 2, 3,
4>
x;
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
}
TEST(Layout, CharTypes) { TEST(Layout, CharTypes) {
constexpr Layout<int32_t> x(1); constexpr Layout<int32_t> x(1);
alignas(max_align_t) char c[x.AllocSize()] = {}; alignas(max_align_t) char c[x.AllocSize()] = {};
...@@ -1638,6 +1997,35 @@ TEST(CompactString, Works) { ...@@ -1638,6 +1997,35 @@ TEST(CompactString, Works) {
EXPECT_STREQ("hello", s.c_str()); EXPECT_STREQ("hello", s.c_str());
} }
// Same as the previous CompactString example, except we set the first array
// size to 1 statically, since we know it is always 1. This allows us to compute
// the offset of the character array at compile time.
class StaticCompactString {
public:
StaticCompactString(const char* s = "") { // NOLINT
const size_t size = strlen(s);
const SL layout(size + 1);
p_.reset(new unsigned char[layout.AllocSize()]);
layout.PoisonPadding(p_.get());
*layout.Pointer<size_t>(p_.get()) = size;
memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
}
size_t size() const { return *SL::Partial().Pointer<size_t>(p_.get()); }
const char* c_str() const { return SL::Partial().Pointer<char>(p_.get()); }
private:
using SL = Layout<size_t, char>::WithStaticSizes<1>;
std::unique_ptr<unsigned char[]> p_;
};
TEST(StaticCompactString, Works) {
StaticCompactString s = "hello";
EXPECT_EQ(5, s.size());
EXPECT_STREQ("hello", s.c_str());
}
} // namespace example } // namespace example
} // namespace } // namespace
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment