Export of internal Abseil changes.
-- c321829735accc2e6beb81e6a5a4421e5647b876 by CJ Johnson <johnsoncj@google.com>: Updates the definition of InlinedVector::swap(InlinedVector&) to be exception safe and adds exception safety tests PiperOrigin-RevId: 255511536 -- 0d86445891748efb09430eb9ede267b54185a246 by CJ Johnson <johnsoncj@google.com>: Updates the definition of InlinedVector::erase(...) to be exception safe and adds an exception safety test for it. PiperOrigin-RevId: 255492671 -- f07e8fa62dfe9eb0d025b27fca8c6db43c5a328f by CJ Johnson <johnsoncj@google.com>: Updates the implementation of InlinedVector::emplace_back(...) to be exception safe and adds exception safety tests PiperOrigin-RevId: 255422837 -- 4c3be92bfe4c1636a03cef8fd5aa802fed0d2c61 by Abseil Team <absl-team@google.com>: Internal Change PiperOrigin-RevId: 255422693 -- 6df38ea42f00678c357a539016163f8ac4c084e6 by Gennadiy Rozental <rogeeff@google.com>: Introduce public interfaces for setting and getting program usage messages. PiperOrigin-RevId: 255291467 -- 8f21d594aed3971d37db70226847c693eb548edb by Laramie Leavitt <lar@google.com>: Move absl/random's copy of ABSL_ATTRIBUTE_FORCE_INLINE and ABSL_ATTRIBUTE_NEVER_INLINE into .cc files and rename to prevent conflicts. https://github.com/abseil/abseil-cpp/issues/343 PiperOrigin-RevId: 255288599 -- 6b7430ad0c8bd860fb9394894f5eeedd1acc9f77 by CJ Johnson <johnsoncj@google.com>: Updates the ScopedAllocatorWorks test for InlinedVector to not rely on the byte count allocated by the standard library In doing so, removes LegacyNextCapacityFrom(...) impl function from InlinedVector Also applies clang-format to the test file PiperOrigin-RevId: 255207606 GitOrigin-RevId: c321829735accc2e6beb81e6a5a4421e5647b876 Change-Id: I7438211c36c4549fca2e866658f8d579c65d7d52
This commit is contained in:
parent
72e09a54d9
commit
c964fcffac
16 changed files with 518 additions and 367 deletions
|
@ -640,28 +640,7 @@ class InlinedVector {
|
||||||
// returning a `reference` to the emplaced element.
|
// returning a `reference` to the emplaced element.
|
||||||
template <typename... Args>
|
template <typename... Args>
|
||||||
reference emplace_back(Args&&... args) {
|
reference emplace_back(Args&&... args) {
|
||||||
size_type s = size();
|
return storage_.EmplaceBack(std::forward<Args>(args)...);
|
||||||
if (ABSL_PREDICT_FALSE(s == capacity())) {
|
|
||||||
size_type new_capacity = 2 * capacity();
|
|
||||||
pointer new_data =
|
|
||||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
|
||||||
reference new_element =
|
|
||||||
Construct(new_data + s, std::forward<Args>(args)...);
|
|
||||||
UninitializedCopy(std::make_move_iterator(data()),
|
|
||||||
std::make_move_iterator(data() + s), new_data);
|
|
||||||
ResetAllocation(new_data, new_capacity, s + 1);
|
|
||||||
return new_element;
|
|
||||||
} else {
|
|
||||||
pointer space;
|
|
||||||
if (storage_.GetIsAllocated()) {
|
|
||||||
storage_.SetAllocatedSize(s + 1);
|
|
||||||
space = storage_.GetAllocatedData();
|
|
||||||
} else {
|
|
||||||
storage_.SetInlinedSize(s + 1);
|
|
||||||
space = storage_.GetInlinedData();
|
|
||||||
}
|
|
||||||
return Construct(space + s, std::forward<Args>(args)...);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// `InlinedVector::push_back()`
|
// `InlinedVector::push_back()`
|
||||||
|
@ -696,10 +675,7 @@ class InlinedVector {
|
||||||
assert(pos >= begin());
|
assert(pos >= begin());
|
||||||
assert(pos < end());
|
assert(pos < end());
|
||||||
|
|
||||||
iterator position = const_cast<iterator>(pos);
|
return storage_.Erase(pos, pos + 1);
|
||||||
std::move(position + 1, end(), position);
|
|
||||||
pop_back();
|
|
||||||
return position;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overload of `InlinedVector::erase()` for erasing all elements in the
|
// Overload of `InlinedVector::erase()` for erasing all elements in the
|
||||||
|
@ -707,28 +683,15 @@ class InlinedVector {
|
||||||
// to the first element following the range erased or the end iterator if `to`
|
// to the first element following the range erased or the end iterator if `to`
|
||||||
// was the end iterator.
|
// was the end iterator.
|
||||||
iterator erase(const_iterator from, const_iterator to) {
|
iterator erase(const_iterator from, const_iterator to) {
|
||||||
assert(begin() <= from);
|
assert(from >= begin());
|
||||||
assert(from <= to);
|
assert(from <= to);
|
||||||
assert(to <= end());
|
assert(to <= end());
|
||||||
|
|
||||||
iterator range_start = const_cast<iterator>(from);
|
if (ABSL_PREDICT_TRUE(from != to)) {
|
||||||
iterator range_end = const_cast<iterator>(to);
|
return storage_.Erase(from, to);
|
||||||
|
} else {
|
||||||
size_type s = size();
|
return const_cast<iterator>(from);
|
||||||
ptrdiff_t erase_gap = std::distance(range_start, range_end);
|
|
||||||
if (erase_gap > 0) {
|
|
||||||
pointer space;
|
|
||||||
if (storage_.GetIsAllocated()) {
|
|
||||||
space = storage_.GetAllocatedData();
|
|
||||||
storage_.SetAllocatedSize(s - erase_gap);
|
|
||||||
} else {
|
|
||||||
space = storage_.GetInlinedData();
|
|
||||||
storage_.SetInlinedSize(s - erase_gap);
|
|
||||||
}
|
|
||||||
std::move(range_end, space + s, range_start);
|
|
||||||
Destroy(space + s - erase_gap, space + s);
|
|
||||||
}
|
}
|
||||||
return range_start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// `InlinedVector::clear()`
|
// `InlinedVector::clear()`
|
||||||
|
@ -774,96 +737,9 @@ class InlinedVector {
|
||||||
//
|
//
|
||||||
// Swaps the contents of this inlined vector with the contents of `other`.
|
// Swaps the contents of this inlined vector with the contents of `other`.
|
||||||
void swap(InlinedVector& other) {
|
void swap(InlinedVector& other) {
|
||||||
using std::swap;
|
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
|
||||||
|
storage_.Swap(std::addressof(other.storage_));
|
||||||
if (ABSL_PREDICT_FALSE(this == std::addressof(other))) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_allocated = storage_.GetIsAllocated();
|
|
||||||
bool other_is_allocated = other.storage_.GetIsAllocated();
|
|
||||||
|
|
||||||
if (is_allocated && other_is_allocated) {
|
|
||||||
// Both out of line, so just swap the tag, allocation, and allocator.
|
|
||||||
storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_));
|
|
||||||
storage_.SwapAllocatedSizeAndCapacity(std::addressof(other.storage_));
|
|
||||||
swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr());
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_allocated && !other_is_allocated) {
|
|
||||||
// Both inlined: swap up to smaller size, then move remaining elements.
|
|
||||||
InlinedVector* a = this;
|
|
||||||
InlinedVector* b = std::addressof(other);
|
|
||||||
if (size() < other.size()) {
|
|
||||||
swap(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_type a_size = a->size();
|
|
||||||
const size_type b_size = b->size();
|
|
||||||
assert(a_size >= b_size);
|
|
||||||
// `a` is larger. Swap the elements up to the smaller array size.
|
|
||||||
std::swap_ranges(a->storage_.GetInlinedData(),
|
|
||||||
a->storage_.GetInlinedData() + b_size,
|
|
||||||
b->storage_.GetInlinedData());
|
|
||||||
|
|
||||||
// Move the remaining elements:
|
|
||||||
// [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b`
|
|
||||||
b->UninitializedCopy(a->storage_.GetInlinedData() + b_size,
|
|
||||||
a->storage_.GetInlinedData() + a_size,
|
|
||||||
b->storage_.GetInlinedData() + b_size);
|
|
||||||
a->Destroy(a->storage_.GetInlinedData() + b_size,
|
|
||||||
a->storage_.GetInlinedData() + a_size);
|
|
||||||
|
|
||||||
storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_));
|
|
||||||
swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr());
|
|
||||||
|
|
||||||
assert(b->size() == a_size);
|
|
||||||
assert(a->size() == b_size);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// One is out of line, one is inline.
|
|
||||||
// We first move the elements from the inlined vector into the
|
|
||||||
// inlined space in the other vector. We then put the other vector's
|
|
||||||
// pointer/capacity into the originally inlined vector and swap
|
|
||||||
// the tags.
|
|
||||||
InlinedVector* a = this;
|
|
||||||
InlinedVector* b = std::addressof(other);
|
|
||||||
if (a->storage_.GetIsAllocated()) {
|
|
||||||
swap(a, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(!a->storage_.GetIsAllocated());
|
|
||||||
assert(b->storage_.GetIsAllocated());
|
|
||||||
|
|
||||||
const size_type a_size = a->size();
|
|
||||||
const size_type b_size = b->size();
|
|
||||||
// In an optimized build, `b_size` would be unused.
|
|
||||||
static_cast<void>(b_size);
|
|
||||||
|
|
||||||
// Made Local copies of `size()`, these can now be swapped
|
|
||||||
a->storage_.SwapSizeAndIsAllocated(std::addressof(b->storage_));
|
|
||||||
|
|
||||||
// Copy out before `b`'s union gets clobbered by `inline_space`
|
|
||||||
pointer b_data = b->storage_.GetAllocatedData();
|
|
||||||
size_type b_capacity = b->storage_.GetAllocatedCapacity();
|
|
||||||
|
|
||||||
b->UninitializedCopy(a->storage_.GetInlinedData(),
|
|
||||||
a->storage_.GetInlinedData() + a_size,
|
|
||||||
b->storage_.GetInlinedData());
|
|
||||||
a->Destroy(a->storage_.GetInlinedData(),
|
|
||||||
a->storage_.GetInlinedData() + a_size);
|
|
||||||
|
|
||||||
a->storage_.SetAllocatedData(b_data, b_capacity);
|
|
||||||
|
|
||||||
if (*a->storage_.GetAllocPtr() != *b->storage_.GetAllocPtr()) {
|
|
||||||
swap(*a->storage_.GetAllocPtr(), *b->storage_.GetAllocPtr());
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(b->size() == a_size);
|
|
||||||
assert(a->size() == b_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -279,12 +279,34 @@ TYPED_TEST(TwoSizeTest, Resize) {
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TYPED_TEST(OneSizeTest, EmplaceBack) {
|
||||||
|
using VecT = typename TypeParam::VecT;
|
||||||
|
constexpr static auto size = TypeParam::GetSizeAt(0);
|
||||||
|
|
||||||
|
VecT full_vec{size};
|
||||||
|
full_vec.resize(full_vec.capacity());
|
||||||
|
|
||||||
|
VecT nonfull_vec{size};
|
||||||
|
nonfull_vec.reserve(size + 1);
|
||||||
|
|
||||||
|
auto tester = testing::MakeExceptionSafetyTester().WithContracts(
|
||||||
|
InlinedVectorInvariants<VecT>);
|
||||||
|
|
||||||
|
EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) {
|
||||||
|
vec->emplace_back(); //
|
||||||
|
}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(tester.WithInitialValue(full_vec).Test([](VecT* vec) {
|
||||||
|
vec->emplace_back(); //
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
TYPED_TEST(OneSizeTest, PopBack) {
|
TYPED_TEST(OneSizeTest, PopBack) {
|
||||||
using VecT = typename TypeParam::VecT;
|
using VecT = typename TypeParam::VecT;
|
||||||
constexpr static auto size = TypeParam::GetSizeAt(0);
|
constexpr static auto size = TypeParam::GetSizeAt(0);
|
||||||
|
|
||||||
auto tester = testing::MakeExceptionSafetyTester()
|
auto tester = testing::MakeExceptionSafetyTester()
|
||||||
.WithInitialValue(VecT(size))
|
.WithInitialValue(VecT{size})
|
||||||
.WithContracts(NoThrowGuarantee<VecT>);
|
.WithContracts(NoThrowGuarantee<VecT>);
|
||||||
|
|
||||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
@ -292,12 +314,47 @@ TYPED_TEST(OneSizeTest, PopBack) {
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TYPED_TEST(OneSizeTest, Erase) {
|
||||||
|
using VecT = typename TypeParam::VecT;
|
||||||
|
constexpr static auto size = TypeParam::GetSizeAt(0);
|
||||||
|
|
||||||
|
auto tester = testing::MakeExceptionSafetyTester()
|
||||||
|
.WithInitialValue(VecT{size})
|
||||||
|
.WithContracts(InlinedVectorInvariants<VecT>);
|
||||||
|
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
auto it = vec->begin();
|
||||||
|
vec->erase(it);
|
||||||
|
}));
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
auto it = vec->begin() + (vec->size() / 2);
|
||||||
|
vec->erase(it);
|
||||||
|
}));
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
auto it = vec->begin() + (vec->size() - 1);
|
||||||
|
vec->erase(it);
|
||||||
|
}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
auto it = vec->begin();
|
||||||
|
vec->erase(it, it + 1);
|
||||||
|
}));
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
auto it = vec->begin() + (vec->size() / 2);
|
||||||
|
vec->erase(it, it + 1);
|
||||||
|
}));
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
auto it = vec->begin() + (vec->size() - 1);
|
||||||
|
vec->erase(it, it + 1);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
TYPED_TEST(OneSizeTest, Clear) {
|
TYPED_TEST(OneSizeTest, Clear) {
|
||||||
using VecT = typename TypeParam::VecT;
|
using VecT = typename TypeParam::VecT;
|
||||||
constexpr static auto size = TypeParam::GetSizeAt(0);
|
constexpr static auto size = TypeParam::GetSizeAt(0);
|
||||||
|
|
||||||
auto tester = testing::MakeExceptionSafetyTester()
|
auto tester = testing::MakeExceptionSafetyTester()
|
||||||
.WithInitialValue(VecT(size))
|
.WithInitialValue(VecT{size})
|
||||||
.WithContracts(NoThrowGuarantee<VecT>);
|
.WithContracts(NoThrowGuarantee<VecT>);
|
||||||
|
|
||||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
@ -332,4 +389,25 @@ TYPED_TEST(OneSizeTest, ShrinkToFit) {
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TYPED_TEST(TwoSizeTest, Swap) {
|
||||||
|
using VecT = typename TypeParam::VecT;
|
||||||
|
constexpr static auto from_size = TypeParam::GetSizeAt(0);
|
||||||
|
constexpr static auto to_size = TypeParam::GetSizeAt(1);
|
||||||
|
|
||||||
|
auto tester = testing::MakeExceptionSafetyTester()
|
||||||
|
.WithInitialValue(VecT{from_size})
|
||||||
|
.WithContracts(InlinedVectorInvariants<VecT>);
|
||||||
|
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
VecT other_vec{to_size};
|
||||||
|
vec->swap(other_vec);
|
||||||
|
}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||||
|
using std::swap;
|
||||||
|
VecT other_vec{to_size};
|
||||||
|
swap(*vec, other_vec);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
|
@ -76,12 +76,9 @@ TYPED_TEST_SUITE_P(InstanceTest);
|
||||||
// destroyed in the erase(begin, end) test.
|
// destroyed in the erase(begin, end) test.
|
||||||
class RefCounted {
|
class RefCounted {
|
||||||
public:
|
public:
|
||||||
RefCounted(int value, int* count) : value_(value), count_(count) {
|
RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
|
||||||
Ref();
|
|
||||||
}
|
|
||||||
|
|
||||||
RefCounted(const RefCounted& v)
|
RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
|
||||||
: value_(v.value_), count_(v.count_) {
|
|
||||||
Ref();
|
Ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +287,7 @@ TEST(RefCountedVec, EraseBeginEnd) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the elements at the end are preserved.
|
// Check that the elements at the end are preserved.
|
||||||
for (int i = erase_end; i< len; ++i) {
|
for (int i = erase_end; i < len; ++i) {
|
||||||
EXPECT_EQ(1, counts[i]);
|
EXPECT_EQ(1, counts[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -552,10 +549,10 @@ TEST(IntVec, Resize) {
|
||||||
static const int kResizeElem = 1000000;
|
static const int kResizeElem = 1000000;
|
||||||
for (int k = 0; k < 10; k++) {
|
for (int k = 0; k < 10; k++) {
|
||||||
// Enlarging resize
|
// Enlarging resize
|
||||||
v.resize(len+k, kResizeElem);
|
v.resize(len + k, kResizeElem);
|
||||||
EXPECT_EQ(len+k, v.size());
|
EXPECT_EQ(len + k, v.size());
|
||||||
EXPECT_LE(len+k, v.capacity());
|
EXPECT_LE(len + k, v.capacity());
|
||||||
for (int i = 0; i < len+k; i++) {
|
for (int i = 0; i < len + k; i++) {
|
||||||
if (i < len) {
|
if (i < len) {
|
||||||
EXPECT_EQ(i, v[i]);
|
EXPECT_EQ(i, v[i]);
|
||||||
} else {
|
} else {
|
||||||
|
@ -866,7 +863,7 @@ TYPED_TEST_P(InstanceTest, Swap) {
|
||||||
auto min_len = std::min(l1, l2);
|
auto min_len = std::min(l1, l2);
|
||||||
auto max_len = std::max(l1, l2);
|
auto max_len = std::max(l1, l2);
|
||||||
for (int i = 0; i < l1; i++) a.push_back(Instance(i));
|
for (int i = 0; i < l1; i++) a.push_back(Instance(i));
|
||||||
for (int i = 0; i < l2; i++) b.push_back(Instance(100+i));
|
for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
|
||||||
EXPECT_EQ(tracker.instances(), l1 + l2);
|
EXPECT_EQ(tracker.instances(), l1 + l2);
|
||||||
tracker.ResetCopiesMovesSwaps();
|
tracker.ResetCopiesMovesSwaps();
|
||||||
{
|
{
|
||||||
|
@ -934,7 +931,7 @@ TEST(IntVec, EqualAndNotEqual) {
|
||||||
EXPECT_FALSE(a == b);
|
EXPECT_FALSE(a == b);
|
||||||
EXPECT_TRUE(a != b);
|
EXPECT_TRUE(a != b);
|
||||||
|
|
||||||
b[i] = b[i] - 1; // Back to before
|
b[i] = b[i] - 1; // Back to before
|
||||||
EXPECT_TRUE(a == b);
|
EXPECT_TRUE(a == b);
|
||||||
EXPECT_FALSE(a != b);
|
EXPECT_FALSE(a != b);
|
||||||
}
|
}
|
||||||
|
@ -1001,7 +998,7 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
|
||||||
|
|
||||||
// reserve() must not increase the number of initialized objects
|
// reserve() must not increase the number of initialized objects
|
||||||
SCOPED_TRACE("reserve");
|
SCOPED_TRACE("reserve");
|
||||||
v.reserve(len+1000);
|
v.reserve(len + 1000);
|
||||||
EXPECT_EQ(tracker.instances(), len);
|
EXPECT_EQ(tracker.instances(), len);
|
||||||
EXPECT_EQ(tracker.copies() + tracker.moves(), len);
|
EXPECT_EQ(tracker.copies() + tracker.moves(), len);
|
||||||
|
|
||||||
|
@ -1247,9 +1244,8 @@ void InstanceCountElemAssignWithAllocationTest() {
|
||||||
absl::InlinedVector<Instance, 2> v(original_contents.begin(),
|
absl::InlinedVector<Instance, 2> v(original_contents.begin(),
|
||||||
original_contents.end());
|
original_contents.end());
|
||||||
v.assign(3, Instance(123));
|
v.assign(3, Instance(123));
|
||||||
EXPECT_THAT(v,
|
EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123),
|
||||||
AllOf(SizeIs(3),
|
ValueIs(123))));
|
||||||
ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123))));
|
|
||||||
EXPECT_LE(v.size(), v.capacity());
|
EXPECT_LE(v.size(), v.capacity());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1528,8 +1524,8 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) {
|
||||||
SCOPED_TRACE(original_size);
|
SCOPED_TRACE(original_size);
|
||||||
absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
|
absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
|
||||||
v.assign({Instance(3), Instance(4), Instance(5)});
|
v.assign({Instance(3), Instance(4), Instance(5)});
|
||||||
EXPECT_THAT(v, AllOf(SizeIs(3),
|
EXPECT_THAT(
|
||||||
ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
|
v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
|
||||||
EXPECT_LE(3, v.capacity());
|
EXPECT_LE(3, v.capacity());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1554,7 +1550,7 @@ TEST(DynamicVec, DynamicVecCompiles) {
|
||||||
TEST(AllocatorSupportTest, Constructors) {
|
TEST(AllocatorSupportTest, Constructors) {
|
||||||
using MyAlloc = CountingAllocator<int>;
|
using MyAlloc = CountingAllocator<int>;
|
||||||
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
|
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
|
||||||
const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||||
int64_t allocated = 0;
|
int64_t allocated = 0;
|
||||||
MyAlloc alloc(&allocated);
|
MyAlloc alloc(&allocated);
|
||||||
{ AllocVec ABSL_ATTRIBUTE_UNUSED v; }
|
{ AllocVec ABSL_ATTRIBUTE_UNUSED v; }
|
||||||
|
@ -1570,7 +1566,7 @@ TEST(AllocatorSupportTest, Constructors) {
|
||||||
TEST(AllocatorSupportTest, CountAllocations) {
|
TEST(AllocatorSupportTest, CountAllocations) {
|
||||||
using MyAlloc = CountingAllocator<int>;
|
using MyAlloc = CountingAllocator<int>;
|
||||||
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
|
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
|
||||||
const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||||
int64_t allocated = 0;
|
int64_t allocated = 0;
|
||||||
MyAlloc alloc(&allocated);
|
MyAlloc alloc(&allocated);
|
||||||
{
|
{
|
||||||
|
@ -1634,8 +1630,8 @@ TEST(AllocatorSupportTest, SwapBothAllocated) {
|
||||||
int64_t allocated1 = 0;
|
int64_t allocated1 = 0;
|
||||||
int64_t allocated2 = 0;
|
int64_t allocated2 = 0;
|
||||||
{
|
{
|
||||||
const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||||
const int ia2[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
|
const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||||
MyAlloc a1(&allocated1);
|
MyAlloc a1(&allocated1);
|
||||||
MyAlloc a2(&allocated2);
|
MyAlloc a2(&allocated2);
|
||||||
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
|
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
|
||||||
|
@ -1659,8 +1655,8 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
|
||||||
int64_t allocated1 = 0;
|
int64_t allocated1 = 0;
|
||||||
int64_t allocated2 = 0;
|
int64_t allocated2 = 0;
|
||||||
{
|
{
|
||||||
const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||||
const int ia2[] = { 0, 1, 2, 3 };
|
const int ia2[] = {0, 1, 2, 3};
|
||||||
MyAlloc a1(&allocated1);
|
MyAlloc a1(&allocated1);
|
||||||
MyAlloc a2(&allocated2);
|
MyAlloc a2(&allocated2);
|
||||||
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
|
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
|
||||||
|
@ -1681,65 +1677,42 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
|
||||||
|
|
||||||
TEST(AllocatorSupportTest, ScopedAllocatorWorks) {
|
TEST(AllocatorSupportTest, ScopedAllocatorWorks) {
|
||||||
using StdVector = std::vector<int, CountingAllocator<int>>;
|
using StdVector = std::vector<int, CountingAllocator<int>>;
|
||||||
using MyAlloc =
|
using Alloc = CountingAllocator<StdVector>;
|
||||||
std::scoped_allocator_adaptor<CountingAllocator<StdVector>>;
|
using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
|
||||||
using AllocVec = absl::InlinedVector<StdVector, 4, MyAlloc>;
|
using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
|
||||||
|
|
||||||
// MSVC 2017's std::vector allocates different amounts of memory in debug
|
{
|
||||||
// versus opt mode.
|
int64_t total_allocated_byte_count = 0;
|
||||||
int64_t test_allocated = 0;
|
|
||||||
StdVector v(CountingAllocator<int>{&test_allocated});
|
|
||||||
// The amount of memory allocated by a default constructed vector<int>
|
|
||||||
auto default_std_vec_allocated = test_allocated;
|
|
||||||
v.push_back(1);
|
|
||||||
// The amound of memory allocated by a copy-constructed vector<int> with one
|
|
||||||
// element.
|
|
||||||
int64_t one_element_std_vec_copy_allocated = test_allocated;
|
|
||||||
|
|
||||||
int64_t allocated = 0;
|
AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
|
||||||
AllocVec vec(MyAlloc{CountingAllocator<StdVector>{&allocated}});
|
inlined_case.emplace_back();
|
||||||
EXPECT_EQ(allocated, 0);
|
|
||||||
|
|
||||||
// This default constructs a vector<int>, but the allocator should pass itself
|
int64_t absl_responsible_for_count = total_allocated_byte_count;
|
||||||
// into the vector<int>, so check allocation compared to that.
|
EXPECT_EQ(absl_responsible_for_count, 0);
|
||||||
// The absl::InlinedVector does not allocate any memory.
|
|
||||||
// The vector<int> may allocate any memory.
|
|
||||||
auto expected = default_std_vec_allocated;
|
|
||||||
vec.resize(1);
|
|
||||||
EXPECT_EQ(allocated, expected);
|
|
||||||
|
|
||||||
// We make vector<int> allocate memory.
|
inlined_case[0].emplace_back();
|
||||||
// It must go through the allocator even though we didn't construct the
|
EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
|
||||||
// vector directly. This assumes that vec[0] doesn't need to grow its
|
|
||||||
// allocation.
|
|
||||||
expected += sizeof(int);
|
|
||||||
vec[0].push_back(1);
|
|
||||||
EXPECT_EQ(allocated, expected);
|
|
||||||
|
|
||||||
// Another allocating vector.
|
inlined_case.clear();
|
||||||
expected += one_element_std_vec_copy_allocated;
|
EXPECT_EQ(total_allocated_byte_count, 0);
|
||||||
vec.push_back(vec[0]);
|
}
|
||||||
EXPECT_EQ(allocated, expected);
|
|
||||||
|
|
||||||
// Overflow the inlined memory.
|
{
|
||||||
// The absl::InlinedVector will now allocate.
|
int64_t total_allocated_byte_count = 0;
|
||||||
expected += sizeof(StdVector) * 8 + default_std_vec_allocated * 3;
|
|
||||||
vec.resize(5);
|
|
||||||
EXPECT_EQ(allocated, expected);
|
|
||||||
|
|
||||||
// Adding one more in external mode should also work.
|
AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
|
||||||
expected += one_element_std_vec_copy_allocated;
|
allocated_case.emplace_back();
|
||||||
vec.push_back(vec[0]);
|
allocated_case.emplace_back();
|
||||||
EXPECT_EQ(allocated, expected);
|
|
||||||
|
|
||||||
// And extending these should still work. This assumes that vec[0] does not
|
int64_t absl_responsible_for_count = total_allocated_byte_count;
|
||||||
// need to grow its allocation.
|
EXPECT_GT(absl_responsible_for_count, 0);
|
||||||
expected += sizeof(int);
|
|
||||||
vec[0].push_back(1);
|
|
||||||
EXPECT_EQ(allocated, expected);
|
|
||||||
|
|
||||||
vec.clear();
|
allocated_case[1].emplace_back();
|
||||||
EXPECT_EQ(allocated, 0);
|
EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
|
||||||
|
|
||||||
|
allocated_case.clear();
|
||||||
|
EXPECT_EQ(total_allocated_byte_count, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AllocatorSupportTest, SizeAllocConstructor) {
|
TEST(AllocatorSupportTest, SizeAllocConstructor) {
|
||||||
|
|
|
@ -364,16 +364,6 @@ class Storage {
|
||||||
allocation_tx_ptr->GetCapacity() = 0;
|
allocation_tx_ptr->GetCapacity() = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SwapSizeAndIsAllocated(Storage* other) {
|
|
||||||
using std::swap;
|
|
||||||
swap(GetSizeAndIsAllocated(), other->GetSizeAndIsAllocated());
|
|
||||||
}
|
|
||||||
|
|
||||||
void SwapAllocatedSizeAndCapacity(Storage* other) {
|
|
||||||
using std::swap;
|
|
||||||
swap(data_.allocated, other->data_.allocated);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemcpyFrom(const Storage& other_storage) {
|
void MemcpyFrom(const Storage& other_storage) {
|
||||||
assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
|
assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
|
||||||
|
|
||||||
|
@ -390,10 +380,17 @@ class Storage {
|
||||||
template <typename ValueAdapter>
|
template <typename ValueAdapter>
|
||||||
void Resize(ValueAdapter values, size_type new_size);
|
void Resize(ValueAdapter values, size_type new_size);
|
||||||
|
|
||||||
|
template <typename... Args>
|
||||||
|
reference EmplaceBack(Args&&... args);
|
||||||
|
|
||||||
|
iterator Erase(const_iterator from, const_iterator to);
|
||||||
|
|
||||||
void Reserve(size_type requested_capacity);
|
void Reserve(size_type requested_capacity);
|
||||||
|
|
||||||
void ShrinkToFit();
|
void ShrinkToFit();
|
||||||
|
|
||||||
|
void Swap(Storage* other_storage_ptr);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
|
size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
|
||||||
|
|
||||||
|
@ -401,14 +398,8 @@ class Storage {
|
||||||
return metadata_.template get<1>();
|
return metadata_.template get<1>();
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_type LegacyNextCapacityFrom(size_type current_capacity,
|
static size_type NextCapacityFrom(size_type current_capacity) {
|
||||||
size_type requested_capacity) {
|
return current_capacity * 2;
|
||||||
// TODO(johnsoncj): Get rid of this old behavior.
|
|
||||||
size_type new_capacity = current_capacity;
|
|
||||||
while (new_capacity < requested_capacity) {
|
|
||||||
new_capacity *= 2;
|
|
||||||
}
|
|
||||||
return new_capacity;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
using Metadata =
|
using Metadata =
|
||||||
|
@ -521,8 +512,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
|
||||||
absl::Span<value_type> destroy_loop;
|
absl::Span<value_type> destroy_loop;
|
||||||
|
|
||||||
if (new_size > storage_view.capacity) {
|
if (new_size > storage_view.capacity) {
|
||||||
pointer new_data = allocation_tx.Allocate(
|
pointer new_data = allocation_tx.Allocate(new_size);
|
||||||
LegacyNextCapacityFrom(storage_view.capacity, new_size));
|
|
||||||
|
|
||||||
// Construct new objects in `new_data`
|
// Construct new objects in `new_data`
|
||||||
construct_loop = {new_data + storage_view.size,
|
construct_loop = {new_data + storage_view.size,
|
||||||
|
@ -562,6 +552,75 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
|
||||||
SetSize(new_size);
|
SetSize(new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, size_t N, typename A>
|
||||||
|
template <typename... Args>
|
||||||
|
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
|
||||||
|
StorageView storage_view = MakeStorageView();
|
||||||
|
|
||||||
|
AllocationTransaction allocation_tx(GetAllocPtr());
|
||||||
|
|
||||||
|
IteratorValueAdapter<MoveIterator> move_values(
|
||||||
|
MoveIterator(storage_view.data));
|
||||||
|
|
||||||
|
pointer construct_data =
|
||||||
|
(storage_view.size == storage_view.capacity
|
||||||
|
? allocation_tx.Allocate(NextCapacityFrom(storage_view.capacity))
|
||||||
|
: storage_view.data);
|
||||||
|
|
||||||
|
pointer last_ptr = construct_data + storage_view.size;
|
||||||
|
AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
|
||||||
|
std::forward<Args>(args)...);
|
||||||
|
|
||||||
|
if (allocation_tx.DidAllocate()) {
|
||||||
|
ABSL_INTERNAL_TRY {
|
||||||
|
inlined_vector_internal::ConstructElements(
|
||||||
|
GetAllocPtr(), allocation_tx.GetData(), &move_values,
|
||||||
|
storage_view.size);
|
||||||
|
}
|
||||||
|
ABSL_INTERNAL_CATCH_ANY {
|
||||||
|
AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
|
||||||
|
ABSL_INTERNAL_RETHROW;
|
||||||
|
}
|
||||||
|
|
||||||
|
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
|
||||||
|
storage_view.size);
|
||||||
|
|
||||||
|
DeallocateIfAllocated();
|
||||||
|
AcquireAllocation(&allocation_tx);
|
||||||
|
SetIsAllocated();
|
||||||
|
}
|
||||||
|
|
||||||
|
AddSize(1);
|
||||||
|
return *last_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, size_t N, typename A>
|
||||||
|
auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
|
||||||
|
-> iterator {
|
||||||
|
assert(from != to);
|
||||||
|
|
||||||
|
StorageView storage_view = MakeStorageView();
|
||||||
|
|
||||||
|
size_type erase_size = std::distance(from, to);
|
||||||
|
size_type erase_index =
|
||||||
|
std::distance(const_iterator(storage_view.data), from);
|
||||||
|
size_type erase_end_index = erase_index + erase_size;
|
||||||
|
|
||||||
|
IteratorValueAdapter<MoveIterator> move_values(
|
||||||
|
MoveIterator(storage_view.data + erase_end_index));
|
||||||
|
|
||||||
|
inlined_vector_internal::AssignElements(storage_view.data + erase_index,
|
||||||
|
&move_values,
|
||||||
|
storage_view.size - erase_end_index);
|
||||||
|
|
||||||
|
inlined_vector_internal::DestroyElements(
|
||||||
|
GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
|
||||||
|
erase_size);
|
||||||
|
|
||||||
|
SubtractSize(erase_size);
|
||||||
|
return iterator(storage_view.data + erase_index);
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T, size_t N, typename A>
|
template <typename T, size_t N, typename A>
|
||||||
auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
|
auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
|
||||||
StorageView storage_view = MakeStorageView();
|
StorageView storage_view = MakeStorageView();
|
||||||
|
@ -573,8 +632,7 @@ auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
|
||||||
IteratorValueAdapter<MoveIterator> move_values(
|
IteratorValueAdapter<MoveIterator> move_values(
|
||||||
MoveIterator(storage_view.data));
|
MoveIterator(storage_view.data));
|
||||||
|
|
||||||
pointer new_data = allocation_tx.Allocate(
|
pointer new_data = allocation_tx.Allocate(requested_capacity);
|
||||||
LegacyNextCapacityFrom(storage_view.capacity, requested_capacity));
|
|
||||||
|
|
||||||
inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
|
inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
|
||||||
&move_values, storage_view.size);
|
&move_values, storage_view.size);
|
||||||
|
@ -592,8 +650,8 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
|
||||||
// May only be called on allocated instances!
|
// May only be called on allocated instances!
|
||||||
assert(GetIsAllocated());
|
assert(GetIsAllocated());
|
||||||
|
|
||||||
StorageView storage_view = {GetAllocatedData(), GetSize(),
|
StorageView storage_view{GetAllocatedData(), GetSize(),
|
||||||
GetAllocatedCapacity()};
|
GetAllocatedCapacity()};
|
||||||
|
|
||||||
AllocationTransaction allocation_tx(GetAllocPtr());
|
AllocationTransaction allocation_tx(GetAllocPtr());
|
||||||
|
|
||||||
|
@ -634,6 +692,82 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, size_t N, typename A>
|
||||||
|
auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
|
||||||
|
using std::swap;
|
||||||
|
assert(this != other_storage_ptr);
|
||||||
|
|
||||||
|
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
|
||||||
|
// Both are allocated, thus we can swap the allocations at the top level.
|
||||||
|
|
||||||
|
swap(data_.allocated, other_storage_ptr->data_.allocated);
|
||||||
|
} else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
|
||||||
|
// Both are inlined, thus element-wise swap up to smaller size, then move
|
||||||
|
// the remaining elements.
|
||||||
|
|
||||||
|
Storage* small_ptr = this;
|
||||||
|
Storage* large_ptr = other_storage_ptr;
|
||||||
|
if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
|
||||||
|
|
||||||
|
for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
|
||||||
|
swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
IteratorValueAdapter<MoveIterator> move_values(
|
||||||
|
MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
|
||||||
|
|
||||||
|
inlined_vector_internal::ConstructElements(
|
||||||
|
large_ptr->GetAllocPtr(),
|
||||||
|
small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
|
||||||
|
large_ptr->GetSize() - small_ptr->GetSize());
|
||||||
|
|
||||||
|
inlined_vector_internal::DestroyElements(
|
||||||
|
large_ptr->GetAllocPtr(),
|
||||||
|
large_ptr->GetInlinedData() + small_ptr->GetSize(),
|
||||||
|
large_ptr->GetSize() - small_ptr->GetSize());
|
||||||
|
} else {
|
||||||
|
// One is allocated and the other is inlined, thus we first move the
|
||||||
|
// elements from the inlined instance to the inlined space in the allocated
|
||||||
|
// instance and then we can finish by having the other vector take on the
|
||||||
|
// allocation.
|
||||||
|
|
||||||
|
Storage* allocated_ptr = this;
|
||||||
|
Storage* inlined_ptr = other_storage_ptr;
|
||||||
|
if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
|
||||||
|
|
||||||
|
StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
|
||||||
|
allocated_ptr->GetSize(),
|
||||||
|
allocated_ptr->GetAllocatedCapacity()};
|
||||||
|
|
||||||
|
IteratorValueAdapter<MoveIterator> move_values(
|
||||||
|
MoveIterator(inlined_ptr->GetInlinedData()));
|
||||||
|
|
||||||
|
ABSL_INTERNAL_TRY {
|
||||||
|
inlined_vector_internal::ConstructElements(
|
||||||
|
inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
|
||||||
|
&move_values, inlined_ptr->GetSize());
|
||||||
|
}
|
||||||
|
ABSL_INTERNAL_CATCH_ANY {
|
||||||
|
// Writing to inlined data will trample on the existing state, thus it
|
||||||
|
// needs to be restored when a construction fails.
|
||||||
|
allocated_ptr->SetAllocatedData(allocated_storage_view.data,
|
||||||
|
allocated_storage_view.capacity);
|
||||||
|
ABSL_INTERNAL_RETHROW;
|
||||||
|
}
|
||||||
|
|
||||||
|
inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
|
||||||
|
inlined_ptr->GetInlinedData(),
|
||||||
|
inlined_ptr->GetSize());
|
||||||
|
|
||||||
|
inlined_ptr->SetAllocatedData(allocated_storage_view.data,
|
||||||
|
allocated_storage_view.capacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
// All cases swap the size, `is_allocated` boolean and the allocator.
|
||||||
|
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
|
||||||
|
swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace inlined_vector_internal
|
} // namespace inlined_vector_internal
|
||||||
} // namespace absl
|
} // namespace absl
|
||||||
|
|
||||||
|
|
|
@ -158,9 +158,11 @@ cc_library(
|
||||||
name = "usage",
|
name = "usage",
|
||||||
srcs = [
|
srcs = [
|
||||||
"internal/usage.cc",
|
"internal/usage.cc",
|
||||||
|
"usage.cc",
|
||||||
],
|
],
|
||||||
hdrs = [
|
hdrs = [
|
||||||
"internal/usage.h",
|
"internal/usage.h",
|
||||||
|
"usage.h",
|
||||||
],
|
],
|
||||||
copts = ABSL_DEFAULT_COPTS,
|
copts = ABSL_DEFAULT_COPTS,
|
||||||
linkopts = ABSL_DEFAULT_LINKOPTS,
|
linkopts = ABSL_DEFAULT_LINKOPTS,
|
||||||
|
|
|
@ -144,8 +144,10 @@ absl_cc_library(
|
||||||
flags_usage
|
flags_usage
|
||||||
SRCS
|
SRCS
|
||||||
"internal/usage.cc"
|
"internal/usage.cc"
|
||||||
|
"usage.cc"
|
||||||
HDRS
|
HDRS
|
||||||
"internal/usage.h"
|
"internal/usage.h"
|
||||||
|
"usage.h"
|
||||||
COPTS
|
COPTS
|
||||||
${ABSL_DEFAULT_COPTS}
|
${ABSL_DEFAULT_COPTS}
|
||||||
LINKOPTS
|
LINKOPTS
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include "absl/flags/flag.h"
|
#include "absl/flags/flag.h"
|
||||||
#include "absl/flags/internal/path_util.h"
|
#include "absl/flags/internal/path_util.h"
|
||||||
#include "absl/flags/internal/program_name.h"
|
#include "absl/flags/internal/program_name.h"
|
||||||
|
#include "absl/flags/usage.h"
|
||||||
#include "absl/flags/usage_config.h"
|
#include "absl/flags/usage_config.h"
|
||||||
#include "absl/strings/ascii.h"
|
#include "absl/strings/ascii.h"
|
||||||
#include "absl/strings/str_cat.h"
|
#include "absl/strings/str_cat.h"
|
||||||
|
@ -204,7 +205,7 @@ void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
|
||||||
HelpFormat format = HelpFormat::kHumanReadable) {
|
HelpFormat format = HelpFormat::kHumanReadable) {
|
||||||
if (format == HelpFormat::kHumanReadable) {
|
if (format == HelpFormat::kHumanReadable) {
|
||||||
out << flags_internal::ShortProgramInvocationName() << ": "
|
out << flags_internal::ShortProgramInvocationName() << ": "
|
||||||
<< flags_internal::ProgramUsageMessage() << "\n\n";
|
<< absl::ProgramUsageMessage() << "\n\n";
|
||||||
} else {
|
} else {
|
||||||
// XML schema is not a part of our public API for now.
|
// XML schema is not a part of our public API for now.
|
||||||
out << "<?xml version=\"1.0\"?>\n"
|
out << "<?xml version=\"1.0\"?>\n"
|
||||||
|
@ -213,7 +214,7 @@ void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
|
||||||
// The program name and usage.
|
// The program name and usage.
|
||||||
<< XMLElement("program", flags_internal::ShortProgramInvocationName())
|
<< XMLElement("program", flags_internal::ShortProgramInvocationName())
|
||||||
<< '\n'
|
<< '\n'
|
||||||
<< XMLElement("usage", flags_internal::ProgramUsageMessage()) << '\n';
|
<< XMLElement("usage", absl::ProgramUsageMessage()) << '\n';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map of package name to
|
// Map of package name to
|
||||||
|
@ -278,38 +279,8 @@ void FlagsHelpImpl(std::ostream& out, flags_internal::FlagKindFilter filter_cb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ABSL_CONST_INIT absl::Mutex usage_message_guard(absl::kConstInit);
|
|
||||||
ABSL_CONST_INIT std::string* program_usage_message
|
|
||||||
GUARDED_BY(usage_message_guard) = nullptr;
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// --------------------------------------------------------------------
|
|
||||||
// Sets the "usage" message to be used by help reporting routines.
|
|
||||||
|
|
||||||
void SetProgramUsageMessage(absl::string_view new_usage_message) {
|
|
||||||
absl::MutexLock l(&usage_message_guard);
|
|
||||||
|
|
||||||
if (flags_internal::program_usage_message != nullptr) {
|
|
||||||
ABSL_INTERNAL_LOG(FATAL, "SetProgramUsageMessage() called twice.");
|
|
||||||
std::exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
program_usage_message = new std::string(new_usage_message);
|
|
||||||
}
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------
|
|
||||||
// Returns the usage message set by SetProgramUsageMessage().
|
|
||||||
// Note: We able to return string_view here only because calling
|
|
||||||
// SetProgramUsageMessage twice is prohibited.
|
|
||||||
absl::string_view ProgramUsageMessage() {
|
|
||||||
absl::MutexLock l(&usage_message_guard);
|
|
||||||
|
|
||||||
return program_usage_message != nullptr
|
|
||||||
? absl::string_view(*program_usage_message)
|
|
||||||
: "Warning: SetProgramUsageMessage() never called";
|
|
||||||
}
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------
|
// --------------------------------------------------------------------
|
||||||
// Produces the help message describing specific flag.
|
// Produces the help message describing specific flag.
|
||||||
void FlagHelp(std::ostream& out, const flags_internal::CommandLineFlag& flag,
|
void FlagHelp(std::ostream& out, const flags_internal::CommandLineFlag& flag,
|
||||||
|
|
|
@ -29,20 +29,6 @@
|
||||||
namespace absl {
|
namespace absl {
|
||||||
namespace flags_internal {
|
namespace flags_internal {
|
||||||
|
|
||||||
// Sets the "usage" message to be used by help reporting routines.
|
|
||||||
// For example:
|
|
||||||
// absl::SetProgramUsageMessage(
|
|
||||||
// absl::StrCat("This program does nothing. Sample usage:\n", argv[0],
|
|
||||||
// " <uselessarg1> <uselessarg2>"));
|
|
||||||
// Do not include commandline flags in the usage: we do that for you!
|
|
||||||
// Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit.
|
|
||||||
void SetProgramUsageMessage(absl::string_view new_usage_message);
|
|
||||||
|
|
||||||
// Returns the usage message set by SetProgramUsageMessage().
|
|
||||||
absl::string_view ProgramUsageMessage();
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------
|
|
||||||
|
|
||||||
// The format to report the help messages in.
|
// The format to report the help messages in.
|
||||||
enum class HelpFormat {
|
enum class HelpFormat {
|
||||||
kHumanReadable,
|
kHumanReadable,
|
||||||
|
|
|
@ -13,14 +13,16 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "absl/flags/internal/usage.h"
|
||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
#include "absl/flags/flag.h"
|
#include "absl/flags/flag.h"
|
||||||
#include "absl/flags/parse.h"
|
|
||||||
#include "absl/flags/internal/path_util.h"
|
#include "absl/flags/internal/path_util.h"
|
||||||
#include "absl/flags/internal/program_name.h"
|
#include "absl/flags/internal/program_name.h"
|
||||||
#include "absl/flags/internal/usage.h"
|
#include "absl/flags/parse.h"
|
||||||
|
#include "absl/flags/usage.h"
|
||||||
#include "absl/flags/usage_config.h"
|
#include "absl/flags/usage_config.h"
|
||||||
#include "absl/memory/memory.h"
|
#include "absl/memory/memory.h"
|
||||||
#include "absl/strings/match.h"
|
#include "absl/strings/match.h"
|
||||||
|
@ -81,11 +83,11 @@ class UsageReportingTest : public testing::Test {
|
||||||
using UsageReportingDeathTest = UsageReportingTest;
|
using UsageReportingDeathTest = UsageReportingTest;
|
||||||
|
|
||||||
TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) {
|
TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) {
|
||||||
EXPECT_EQ(flags::ProgramUsageMessage(), "Custom usage message");
|
EXPECT_EQ(absl::ProgramUsageMessage(), "Custom usage message");
|
||||||
|
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
// TODO(rogeeff): figure out why this does not work on Windows.
|
// TODO(rogeeff): figure out why this does not work on Windows.
|
||||||
EXPECT_DEATH(flags::SetProgramUsageMessage("custom usage message"),
|
EXPECT_DEATH(absl::SetProgramUsageMessage("custom usage message"),
|
||||||
".*SetProgramUsageMessage\\(\\) called twice.*");
|
".*SetProgramUsageMessage\\(\\) called twice.*");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -360,7 +362,7 @@ TEST_F(UsageReportingTest, TestUsageFlag_helpon) {
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
absl::GetFlag(FLAGS_undefok); // Force linking of parse.cc
|
absl::GetFlag(FLAGS_undefok); // Force linking of parse.cc
|
||||||
flags::SetProgramInvocationName("usage_test");
|
flags::SetProgramInvocationName("usage_test");
|
||||||
flags::SetProgramUsageMessage("Custom usage message");
|
absl::SetProgramUsageMessage("Custom usage message");
|
||||||
::testing::InitGoogleTest(&argc, argv);
|
::testing::InitGoogleTest(&argc, argv);
|
||||||
|
|
||||||
return RUN_ALL_TESTS();
|
return RUN_ALL_TESTS();
|
||||||
|
|
56
absl/flags/usage.cc
Normal file
56
absl/flags/usage.cc
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
//
|
||||||
|
// Copyright 2019 The Abseil Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
#include "absl/flags/usage.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/flags/internal/usage.h"
|
||||||
|
#include "absl/synchronization/mutex.h"
|
||||||
|
|
||||||
|
namespace absl {
|
||||||
|
namespace flags_internal {
|
||||||
|
namespace {
|
||||||
|
ABSL_CONST_INIT absl::Mutex usage_message_guard(absl::kConstInit);
|
||||||
|
ABSL_CONST_INIT std::string* program_usage_message
|
||||||
|
GUARDED_BY(usage_message_guard) = nullptr;
|
||||||
|
} // namespace
|
||||||
|
} // namespace flags_internal
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------
|
||||||
|
// Sets the "usage" message to be used by help reporting routines.
|
||||||
|
void SetProgramUsageMessage(absl::string_view new_usage_message) {
|
||||||
|
absl::MutexLock l(&flags_internal::usage_message_guard);
|
||||||
|
|
||||||
|
if (flags_internal::program_usage_message != nullptr) {
|
||||||
|
ABSL_INTERNAL_LOG(FATAL, "SetProgramUsageMessage() called twice.");
|
||||||
|
std::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
flags_internal::program_usage_message = new std::string(new_usage_message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------
|
||||||
|
// Returns the usage message set by SetProgramUsageMessage().
|
||||||
|
// Note: We able to return string_view here only because calling
|
||||||
|
// SetProgramUsageMessage twice is prohibited.
|
||||||
|
absl::string_view ProgramUsageMessage() {
|
||||||
|
absl::MutexLock l(&flags_internal::usage_message_guard);
|
||||||
|
|
||||||
|
return flags_internal::program_usage_message != nullptr
|
||||||
|
? absl::string_view(*flags_internal::program_usage_message)
|
||||||
|
: "Warning: SetProgramUsageMessage() never called";
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace absl
|
40
absl/flags/usage.h
Normal file
40
absl/flags/usage.h
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
//
|
||||||
|
// Copyright 2019 The Abseil Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef ABSL_FLAGS_USAGE_H_
|
||||||
|
#define ABSL_FLAGS_USAGE_H_
|
||||||
|
|
||||||
|
#include "absl/strings/string_view.h"
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------
|
||||||
|
// Usage reporting interfaces
|
||||||
|
|
||||||
|
namespace absl {
|
||||||
|
|
||||||
|
// Sets the "usage" message to be used by help reporting routines.
|
||||||
|
// For example:
|
||||||
|
// absl::SetProgramUsageMessage(
|
||||||
|
// absl::StrCat("This program does nothing. Sample usage:\n", argv[0],
|
||||||
|
// " <uselessarg1> <uselessarg2>"));
|
||||||
|
// Do not include commandline flags in the usage: we do that for you!
|
||||||
|
// Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit.
|
||||||
|
void SetProgramUsageMessage(absl::string_view new_usage_message);
|
||||||
|
|
||||||
|
// Returns the usage message set by SetProgramUsageMessage().
|
||||||
|
absl::string_view ProgramUsageMessage();
|
||||||
|
|
||||||
|
} // namespace absl
|
||||||
|
|
||||||
|
#endif // ABSL_FLAGS_USAGE_H_
|
|
@ -59,6 +59,24 @@
|
||||||
#include <time.h> // NOLINT
|
#include <time.h> // NOLINT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// ABSL_HAVE_ATTRIBUTE
|
||||||
|
#if !defined(ABSL_HAVE_ATTRIBUTE)
|
||||||
|
#ifdef __has_attribute
|
||||||
|
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
|
||||||
|
#else
|
||||||
|
#define ABSL_HAVE_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method.
|
||||||
|
#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
|
||||||
|
#elif defined(_MSC_VER)
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
|
||||||
|
#else
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace absl {
|
namespace absl {
|
||||||
namespace random_internal_nanobenchmark {
|
namespace random_internal_nanobenchmark {
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -658,8 +676,8 @@ Ticks TotalDuration(const Func func, const void* arg, const InputVec* inputs,
|
||||||
}
|
}
|
||||||
|
|
||||||
// (Nearly) empty Func for measuring timer overhead/resolution.
|
// (Nearly) empty Func for measuring timer overhead/resolution.
|
||||||
ABSL_ATTRIBUTE_NEVER_INLINE FuncOutput EmptyFunc(const void* arg,
|
ABSL_RANDOM_INTERNAL_ATTRIBUTE_NEVER_INLINE FuncOutput
|
||||||
const FuncInput input) {
|
EmptyFunc(const void* arg, const FuncInput input) {
|
||||||
return input;
|
return input;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,50 +81,8 @@
|
||||||
// Attribute Checks
|
// Attribute Checks
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
// ABSL_HAVE_ATTRIBUTE
|
|
||||||
#undef ABSL_HAVE_ATTRIBUTE
|
|
||||||
#ifdef __has_attribute
|
|
||||||
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
|
|
||||||
#else
|
|
||||||
#define ABSL_HAVE_ATTRIBUTE(x) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// ABSL_ATTRIBUTE_ALWAYS_INLINE forces inlining of the method.
|
|
||||||
#undef ABSL_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
|
|
||||||
(defined(__GNUC__) && !defined(__clang__))
|
|
||||||
#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
|
|
||||||
#elif defined(_MSC_VER)
|
|
||||||
// We can achieve something similar to attribute((always_inline)) with MSVC by
|
|
||||||
// using the __forceinline keyword, however this is not perfect. MSVC is
|
|
||||||
// much less aggressive about inlining, and even with the __forceinline keyword.
|
|
||||||
#define ABSL_ATTRIBUTE_ALWAYS_INLINE __forceinline
|
|
||||||
#else
|
|
||||||
#define ABSL_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// ABSL_ATTRIBUTE_NEVER_INLINE prevents inlining of the method.
|
|
||||||
#undef ABSL_ATTRIBUTE_NEVER_INLINE
|
|
||||||
#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
|
|
||||||
#define ABSL_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
|
|
||||||
#elif defined(_MSC_VER)
|
|
||||||
#define ABSL_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
|
|
||||||
#else
|
|
||||||
#define ABSL_ATTRIBUTE_NEVER_INLINE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within
|
|
||||||
// the indicated function.
|
|
||||||
#undef ABSL_ATTRIBUTE_FLATTEN
|
|
||||||
#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__))
|
|
||||||
#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten))
|
|
||||||
#else
|
|
||||||
#define ABSL_ATTRIBUTE_FLATTEN
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// ABSL_RANDOM_INTERNAL_RESTRICT annotates whether pointers may be considered
|
// ABSL_RANDOM_INTERNAL_RESTRICT annotates whether pointers may be considered
|
||||||
// to be unaliased.
|
// to be unaliased.
|
||||||
#undef ABSL_RANDOM_INTERNAL_RESTRICT
|
|
||||||
#if defined(__clang__) || defined(__GNUC__)
|
#if defined(__clang__) || defined(__GNUC__)
|
||||||
#define ABSL_RANDOM_INTERNAL_RESTRICT __restrict__
|
#define ABSL_RANDOM_INTERNAL_RESTRICT __restrict__
|
||||||
#elif defined(_MSC_VER)
|
#elif defined(_MSC_VER)
|
||||||
|
|
|
@ -24,6 +24,37 @@
|
||||||
|
|
||||||
#include "absl/random/internal/platform.h"
|
#include "absl/random/internal/platform.h"
|
||||||
|
|
||||||
|
// ABSL_HAVE_ATTRIBUTE
|
||||||
|
#if !defined(ABSL_HAVE_ATTRIBUTE)
|
||||||
|
#ifdef __has_attribute
|
||||||
|
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
|
||||||
|
#else
|
||||||
|
#define ABSL_HAVE_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
|
||||||
|
(defined(__GNUC__) && !defined(__clang__))
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
|
||||||
|
__attribute__((always_inline))
|
||||||
|
#elif defined(_MSC_VER)
|
||||||
|
// We can achieve something similar to attribute((always_inline)) with MSVC by
|
||||||
|
// using the __forceinline keyword, however this is not perfect. MSVC is
|
||||||
|
// much less aggressive about inlining, and even with the __forceinline keyword.
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
|
||||||
|
#else
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// ABSL_ATTRIBUTE_FLATTEN enables much more aggressive inlining within
|
||||||
|
// the indicated function.
|
||||||
|
#undef ABSL_ATTRIBUTE_FLATTEN
|
||||||
|
#if ABSL_HAVE_ATTRIBUTE(flatten) || (defined(__GNUC__) && !defined(__clang__))
|
||||||
|
#define ABSL_ATTRIBUTE_FLATTEN __attribute__((flatten))
|
||||||
|
#else
|
||||||
|
#define ABSL_ATTRIBUTE_FLATTEN
|
||||||
|
#endif
|
||||||
|
|
||||||
// ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain
|
// ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain
|
||||||
// a hardware accelerated implementation of randen, or whether it
|
// a hardware accelerated implementation of randen, or whether it
|
||||||
// will contain stubs that exit the process.
|
// will contain stubs that exit the process.
|
||||||
|
@ -160,7 +191,7 @@ using Vector128 = __vector unsigned long long; // NOLINT(runtime/int)
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
ReverseBytes(const Vector128& v) {
|
ReverseBytes(const Vector128& v) {
|
||||||
// Reverses the bytes of the vector.
|
// Reverses the bytes of the vector.
|
||||||
const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8,
|
const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8,
|
||||||
|
@ -171,26 +202,26 @@ ReverseBytes(const Vector128& v) {
|
||||||
// WARNING: these load/store in native byte order. It is OK to load and then
|
// WARNING: these load/store in native byte order. It is OK to load and then
|
||||||
// store an unchanged vector, but interpreting the bits as a number or input
|
// store an unchanged vector, but interpreting the bits as a number or input
|
||||||
// to AES will have undefined results.
|
// to AES will have undefined results.
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
||||||
return vec_vsx_ld(0, reinterpret_cast<const Vector128*>(from));
|
return vec_vsx_ld(0, reinterpret_cast<const Vector128*>(from));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
|
||||||
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
||||||
vec_vsx_st(v, 0, reinterpret_cast<Vector128*>(to));
|
vec_vsx_st(v, 0, reinterpret_cast<Vector128*>(to));
|
||||||
}
|
}
|
||||||
|
|
||||||
// One round of AES. "round_key" is a public constant for breaking the
|
// One round of AES. "round_key" is a public constant for breaking the
|
||||||
// symmetry of AES (ensures previously equal columns differ afterwards).
|
// symmetry of AES (ensures previously equal columns differ afterwards).
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
AesRound(const Vector128& state, const Vector128& round_key) {
|
AesRound(const Vector128& state, const Vector128& round_key) {
|
||||||
return Vector128(__builtin_crypto_vcipher(state, round_key));
|
return Vector128(__builtin_crypto_vcipher(state, round_key));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enables native loads in the round loop by pre-swapping.
|
// Enables native loads in the round loop by pre-swapping.
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
||||||
using absl::random_internal::RandenTraits;
|
using absl::random_internal::RandenTraits;
|
||||||
constexpr size_t kLanes = 2;
|
constexpr size_t kLanes = 2;
|
||||||
constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks;
|
constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks;
|
||||||
|
@ -242,19 +273,19 @@ using Vector128 = uint8x16_t;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
||||||
return vld1q_u8(reinterpret_cast<const uint8_t*>(from));
|
return vld1q_u8(reinterpret_cast<const uint8_t*>(from));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
|
||||||
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
||||||
vst1q_u8(reinterpret_cast<uint8_t*>(to), v);
|
vst1q_u8(reinterpret_cast<uint8_t*>(to), v);
|
||||||
}
|
}
|
||||||
|
|
||||||
// One round of AES. "round_key" is a public constant for breaking the
|
// One round of AES. "round_key" is a public constant for breaking the
|
||||||
// symmetry of AES (ensures previously equal columns differ afterwards).
|
// symmetry of AES (ensures previously equal columns differ afterwards).
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
AesRound(const Vector128& state, const Vector128& round_key) {
|
AesRound(const Vector128& state, const Vector128& round_key) {
|
||||||
// It is important to always use the full round function - omitting the
|
// It is important to always use the full round function - omitting the
|
||||||
// final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
|
// final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
|
||||||
|
@ -266,8 +297,8 @@ AesRound(const Vector128& state, const Vector128& round_key) {
|
||||||
return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key;
|
return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
|
SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
@ -282,13 +313,15 @@ namespace {
|
||||||
class Vector128 {
|
class Vector128 {
|
||||||
public:
|
public:
|
||||||
// Convert from/to intrinsics.
|
// Convert from/to intrinsics.
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE explicit Vector128(
|
||||||
const __m128i& Vector128)
|
const __m128i& Vector128)
|
||||||
: data_(Vector128) {}
|
: data_(Vector128) {}
|
||||||
|
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const { return data_; }
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __m128i data() const {
|
||||||
|
return data_;
|
||||||
|
}
|
||||||
|
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
|
||||||
const Vector128& other) {
|
const Vector128& other) {
|
||||||
data_ = _mm_xor_si128(data_, other.data());
|
data_ = _mm_xor_si128(data_, other.data());
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -298,20 +331,20 @@ class Vector128 {
|
||||||
__m128i data_;
|
__m128i data_;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
||||||
return Vector128(_mm_load_si128(reinterpret_cast<const __m128i*>(from)));
|
return Vector128(_mm_load_si128(reinterpret_cast<const __m128i*>(from)));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
|
||||||
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
Vector128Store(const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
||||||
_mm_store_si128(reinterpret_cast<__m128i * ABSL_RANDOM_INTERNAL_RESTRICT>(to),
|
_mm_store_si128(reinterpret_cast<__m128i * ABSL_RANDOM_INTERNAL_RESTRICT>(to),
|
||||||
v.data());
|
v.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
// One round of AES. "round_key" is a public constant for breaking the
|
// One round of AES. "round_key" is a public constant for breaking the
|
||||||
// symmetry of AES (ensures previously equal columns differ afterwards).
|
// symmetry of AES (ensures previously equal columns differ afterwards).
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
AesRound(const Vector128& state, const Vector128& round_key) {
|
AesRound(const Vector128& state, const Vector128& round_key) {
|
||||||
// It is important to always use the full round function - omitting the
|
// It is important to always use the full round function - omitting the
|
||||||
// final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
|
// final MixColumns reduces security [https://eprint.iacr.org/2010/041.pdf]
|
||||||
|
@ -319,8 +352,8 @@ AesRound(const Vector128& state, const Vector128& round_key) {
|
||||||
return Vector128(_mm_aesenc_si128(state.data(), round_key.data()));
|
return Vector128(_mm_aesenc_si128(state.data(), round_key.data()));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ABSL_TARGET_CRYPTO ABSL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
|
inline ABSL_TARGET_CRYPTO ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
|
SwapEndian(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT) {}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
@ -417,8 +450,8 @@ constexpr size_t kLanes = 2;
|
||||||
|
|
||||||
// Block shuffles applies a shuffle to the entire state between AES rounds.
|
// Block shuffles applies a shuffle to the entire state between AES rounds.
|
||||||
// Improved odd-even shuffle from "New criterion for diffusion property".
|
// Improved odd-even shuffle from "New criterion for diffusion property".
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void BlockShuffle(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
BlockShuffle(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
||||||
static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
|
static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
|
||||||
|
|
||||||
constexpr size_t shuffle[kFeistelBlocks] = {7, 2, 13, 4, 11, 8, 3, 6,
|
constexpr size_t shuffle[kFeistelBlocks] = {7, 2, 13, 4, 11, 8, 3, 6,
|
||||||
|
@ -466,9 +499,10 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void BlockShuffle(
|
||||||
// per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
|
// per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
|
||||||
// parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
|
// parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
|
||||||
// XORs are 'free' (included in the second AES instruction).
|
// XORs are 'free' (included in the second AES instruction).
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const u64x2*
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO const
|
||||||
FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
|
u64x2*
|
||||||
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
|
FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
|
||||||
|
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
|
||||||
static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
|
static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks.");
|
||||||
|
|
||||||
// MSVC does a horrible job at unrolling loops.
|
// MSVC does a horrible job at unrolling loops.
|
||||||
|
@ -527,9 +561,9 @@ FeistelRound(uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
|
||||||
// Indistinguishable from ideal by chosen-ciphertext adversaries using less than
|
// Indistinguishable from ideal by chosen-ciphertext adversaries using less than
|
||||||
// 2^64 queries if the round function is a PRF. This is similar to the b=8 case
|
// 2^64 queries if the round function is a PRF. This is similar to the b=8 case
|
||||||
// of Simpira v2, but more efficient than its generic construction for b=16.
|
// of Simpira v2, but more efficient than its generic construction for b=16.
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void Permute(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE ABSL_TARGET_CRYPTO void
|
||||||
const void* ABSL_RANDOM_INTERNAL_RESTRICT keys,
|
Permute(const void* ABSL_RANDOM_INTERNAL_RESTRICT keys,
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
||||||
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
|
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
|
||||||
static_cast<const u64x2*>(keys);
|
static_cast<const u64x2*>(keys);
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,28 @@
|
||||||
|
|
||||||
#include "absl/random/internal/platform.h"
|
#include "absl/random/internal/platform.h"
|
||||||
|
|
||||||
|
// ABSL_HAVE_ATTRIBUTE
|
||||||
|
#if !defined(ABSL_HAVE_ATTRIBUTE)
|
||||||
|
#ifdef __has_attribute
|
||||||
|
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
|
||||||
|
#else
|
||||||
|
#define ABSL_HAVE_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
|
||||||
|
(defined(__GNUC__) && !defined(__clang__))
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
|
||||||
|
__attribute__((always_inline))
|
||||||
|
#elif defined(_MSC_VER)
|
||||||
|
// We can achieve something similar to attribute((always_inline)) with MSVC by
|
||||||
|
// using the __forceinline keyword, however this is not perfect. MSVC is
|
||||||
|
// much less aggressive about inlining, and even with the __forceinline keyword.
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
|
||||||
|
#else
|
||||||
|
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
// AES portions based on rijndael-alg-fst.c,
|
// AES portions based on rijndael-alg-fst.c,
|
||||||
|
@ -222,7 +244,7 @@ struct alignas(16) u64x2 {
|
||||||
// as an underlying vector register.
|
// as an underlying vector register.
|
||||||
//
|
//
|
||||||
struct Vector128 {
|
struct Vector128 {
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128& operator^=(
|
||||||
const Vector128& other) {
|
const Vector128& other) {
|
||||||
s[0] ^= other.s[0];
|
s[0] ^= other.s[0];
|
||||||
s[1] ^= other.s[1];
|
s[1] ^= other.s[1];
|
||||||
|
@ -234,7 +256,7 @@ struct Vector128 {
|
||||||
uint32_t s[4];
|
uint32_t s[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
||||||
Vector128 result;
|
Vector128 result;
|
||||||
const uint8_t* ABSL_RANDOM_INTERNAL_RESTRICT src =
|
const uint8_t* ABSL_RANDOM_INTERNAL_RESTRICT src =
|
||||||
|
@ -259,7 +281,7 @@ Vector128Load(const void* ABSL_RANDOM_INTERNAL_RESTRICT from) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
|
||||||
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
const Vector128& v, void* ABSL_RANDOM_INTERNAL_RESTRICT to) {
|
||||||
uint8_t* dst = reinterpret_cast<uint8_t*>(to);
|
uint8_t* dst = reinterpret_cast<uint8_t*>(to);
|
||||||
dst[0] = static_cast<uint8_t>(v.s[0] >> 24);
|
dst[0] = static_cast<uint8_t>(v.s[0] >> 24);
|
||||||
|
@ -282,7 +304,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
|
||||||
|
|
||||||
// One round of AES. "round_key" is a public constant for breaking the
|
// One round of AES. "round_key" is a public constant for breaking the
|
||||||
// symmetry of AES (ensures previously equal columns differ afterwards).
|
// symmetry of AES (ensures previously equal columns differ afterwards).
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
|
||||||
AesRound(const Vector128& state, const Vector128& round_key) {
|
AesRound(const Vector128& state, const Vector128& round_key) {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
Vector128 result;
|
Vector128 result;
|
||||||
|
@ -348,7 +370,7 @@ static_assert(kKeys == kRoundKeys, "kKeys and kRoundKeys must be equal");
|
||||||
static constexpr size_t kLanes = 2;
|
static constexpr size_t kLanes = 2;
|
||||||
|
|
||||||
// The improved Feistel block shuffle function for 16 blocks.
|
// The improved Feistel block shuffle function for 16 blocks.
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state_u64) {
|
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state_u64) {
|
||||||
static_assert(kFeistelBlocks == 16,
|
static_assert(kFeistelBlocks == 16,
|
||||||
"Feistel block shuffle only works for 16 blocks.");
|
"Feistel block shuffle only works for 16 blocks.");
|
||||||
|
@ -409,7 +431,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
|
||||||
// per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
|
// per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in
|
||||||
// parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
|
// parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel
|
||||||
// XORs are 'free' (included in the second AES instruction).
|
// XORs are 'free' (included in the second AES instruction).
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
|
||||||
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
|
uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state,
|
||||||
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
|
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
|
||||||
for (size_t branch = 0; branch < kFeistelBlocks; branch += 4) {
|
for (size_t branch = 0; branch < kFeistelBlocks; branch += 4) {
|
||||||
|
@ -435,7 +457,7 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE const u64x2* FeistelRound(
|
||||||
// Indistinguishable from ideal by chosen-ciphertext adversaries using less than
|
// Indistinguishable from ideal by chosen-ciphertext adversaries using less than
|
||||||
// 2^64 queries if the round function is a PRF. This is similar to the b=8 case
|
// 2^64 queries if the round function is a PRF. This is similar to the b=8 case
|
||||||
// of Simpira v2, but more efficient than its generic construction for b=16.
|
// of Simpira v2, but more efficient than its generic construction for b=16.
|
||||||
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void Permute(
|
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
|
||||||
const void* keys, uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
const void* keys, uint64_t* ABSL_RANDOM_INTERNAL_RESTRICT state) {
|
||||||
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
|
const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 =
|
||||||
static_cast<const u64x2*>(keys);
|
static_cast<const u64x2*>(keys);
|
||||||
|
|
|
@ -62,8 +62,7 @@ inline bool EndsWith(absl::string_view text, absl::string_view suffix) {
|
||||||
return suffix.empty() ||
|
return suffix.empty() ||
|
||||||
(text.size() >= suffix.size() &&
|
(text.size() >= suffix.size() &&
|
||||||
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
|
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
|
||||||
suffix.size()) == 0
|
suffix.size()) == 0);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// EqualsIgnoreCase()
|
// EqualsIgnoreCase()
|
||||||
|
|
Loading…
Reference in a new issue