Export of internal Abseil changes.
-- 7a6ff16a85beb730c172d5d25cf1b5e1be885c56 by Laramie Leavitt <lar@google.com>: Internal change. PiperOrigin-RevId: 254454546 -- ff8f9bafaefc26d451f576ea4a06d150aed63f6f by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254451562 -- deefc5b651b479ce36f0b4ef203e119c0c8936f2 by CJ Johnson <johnsoncj@google.com>: Account for subtracting unsigned values from the size of InlinedVector PiperOrigin-RevId: 254450625 -- 3c677316a27bcadc17e41957c809ca472d5fef14 by Andy Soffer <asoffer@google.com>: Add C++17's std::make_from_tuple to absl/utility/utility.h PiperOrigin-RevId: 254411573 -- 4ee3536a918830eeec402a28fc31a62c7c90b940 by CJ Johnson <johnsoncj@google.com>: Adds benchmark for the rest of the InlinedVector public API PiperOrigin-RevId: 254408378 -- e5a21a00700ee83498ff1efbf649169756463ee4 by CJ Johnson <johnsoncj@google.com>: Updates the definition of InlinedVector::shrink_to_fit() to be exception safe and adds exception safety tests for it. PiperOrigin-RevId: 254401387 -- 2ea82e72b86d82d78b4e4712a63a55981b53c64b by Laramie Leavitt <lar@google.com>: Use absl::InsecureBitGen in place of std::mt19937 in tests absl/random/...distribution_test.cc PiperOrigin-RevId: 254289444 -- fa099e02c413a7ffda732415e8105cad26a90337 by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254286334 -- ce34b7f36933b30cfa35b9c9a5697a792b5666e4 by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254273059 -- 6f9c473da7c2090c2e85a37c5f00622e8a912a89 by Jorg Brown <jorg@google.com>: Change absl::container_internal::CompressedTuple to instantiate its internal Storage class with the name of the type it's holding, rather than the name of the Tuple. This is not an externally-visible change, other than less compiler memory is used and less debug information is generated. PiperOrigin-RevId: 254269285 -- 8bd3c186bf2fc0c55d8a2dd6f28a5327502c9fba by Andy Soffer <asoffer@google.com>: Adding short-hand IntervalClosed for IntervalClosedClosed and IntervalOpen for IntervalOpenOpen. PiperOrigin-RevId: 254252419 -- ea957f99b6a04fccd42aa05605605f3b44b1ecfd by Abseil Team <absl-team@google.com>: Do not directly use __SIZEOF_INT128__. In order to avoid linker errors when building with clang-cl (__fixunsdfti, __udivti3 and __fixunssfti are undefined), this CL uses ABSL_HAVE_INTRINSIC_INT128 which is not defined for clang-cl. PiperOrigin-RevId: 254250739 -- 89ab385cd26b34d64130bce856253aaba96d2345 by Andy Soffer <asoffer@google.com>: Internal changes PiperOrigin-RevId: 254242321 -- cffc793d93eca6d6bdf7de733847b6ab4a255ae9 by CJ Johnson <johnsoncj@google.com>: Adds benchmark for InlinedVector::reserve(size_type) PiperOrigin-RevId: 254199226 -- c90c7a9fa3c8f0c9d5114036979548b055ea2f2a by Gennadiy Rozental <rogeeff@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 254072387 -- c4c388beae016c9570ab54ffa1d52660e4a85b7b by Laramie Leavitt <lar@google.com>: Internal cleanup. PiperOrigin-RevId: 254062381 -- d3c992e221cc74e5372d0c8fa410170b6a43c062 by Tom Manshreck <shreck@google.com>: Update distributions.h to Abseil standards PiperOrigin-RevId: 254054946 -- d15ad0035c34ef11b14fadc5a4a2d3ec415f5518 by CJ Johnson <johnsoncj@google.com>: Removes functions with only one caller from the implementation details of InlinedVector by manually inlining the definitions PiperOrigin-RevId: 254005427 -- 2f37e807efc3a8ef1f4b539bdd379917d4151520 by Andy Soffer <asoffer@google.com>: Initial release of Abseil Random PiperOrigin-RevId: 253999861 -- 24ed1694b6430791d781ed533a8f8ccf6cac5856 by CJ Johnson <johnsoncj@google.com>: Updates the definition of InlinedVector::assign(...)/InlinedVector::operator=(...) to new, exception-safe implementations with exception safety tests to boot PiperOrigin-RevId: 253993691 -- 5613d95f5a7e34a535cfaeadce801441e990843e by CJ Johnson <johnsoncj@google.com>: Adds benchmarks for InlinedVector::shrink_to_fit() PiperOrigin-RevId: 253989647 -- 2a96ddfdac40bbb8cb6a7f1aeab90917067c6e63 by Abseil Team <absl-team@google.com>: Initial release of Abseil Random PiperOrigin-RevId: 253927497 -- bf1aff8fc9ffa921ad74643e9525ecf25b0d8dc1 by Andy Soffer <asoffer@google.com>: Initial release of Abseil Random PiperOrigin-RevId: 253920512 -- bfc03f4a3dcda3cf3a4b84bdb84cda24e3394f41 by Laramie Leavitt <lar@google.com>: Internal change. PiperOrigin-RevId: 253886486 -- 05036cfcc078ca7c5f581a00dfb0daed568cbb69 by Eric Fiselier <ericwf@google.com>: Don't include `winsock2.h` because it drags in `windows.h` and friends, and they define awful macros like OPAQUE, ERROR, and more. This has the potential to break abseil users. Instead we only forward declare `timeval` and require Windows users include `winsock2.h` themselves. This is both inconsistent and poor QoI, but so including 'windows.h' is bad too. PiperOrigin-RevId: 253852615 GitOrigin-RevId: 7a6ff16a85beb730c172d5d25cf1b5e1be885c56 Change-Id: Icd6aff87da26f29ec8915da856f051129987cef6
This commit is contained in:
parent
43ef2148c0
commit
e9324d926a
120 changed files with 22842 additions and 340 deletions
|
@ -127,6 +127,7 @@ cc_library(
|
|||
"//absl/base:core_headers",
|
||||
"//absl/memory",
|
||||
"//absl/meta:type_traits",
|
||||
"//absl/types:span",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -126,6 +126,7 @@ absl_cc_library(
|
|||
absl::compressed_tuple
|
||||
absl::core_headers
|
||||
absl::memory
|
||||
absl::span
|
||||
absl::type_traits
|
||||
PUBLIC
|
||||
)
|
||||
|
|
|
@ -166,7 +166,7 @@ class InlinedVector {
|
|||
InlinedVector(const InlinedVector& other, const allocator_type& alloc)
|
||||
: storage_(alloc) {
|
||||
if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
|
||||
storage_.MemcpyContents(other.storage_);
|
||||
storage_.MemcpyFrom(other.storage_);
|
||||
} else {
|
||||
storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()),
|
||||
other.size());
|
||||
|
@ -193,7 +193,7 @@ class InlinedVector {
|
|||
std::is_nothrow_move_constructible<value_type>::value)
|
||||
: storage_(*other.storage_.GetAllocPtr()) {
|
||||
if (IsMemcpyOk::value) {
|
||||
storage_.MemcpyContents(other.storage_);
|
||||
storage_.MemcpyFrom(other.storage_);
|
||||
other.storage_.SetInlinedSize(0);
|
||||
} else if (other.storage_.GetIsAllocated()) {
|
||||
storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
|
||||
|
@ -227,7 +227,7 @@ class InlinedVector {
|
|||
absl::allocator_is_nothrow<allocator_type>::value)
|
||||
: storage_(alloc) {
|
||||
if (IsMemcpyOk::value) {
|
||||
storage_.MemcpyContents(other.storage_);
|
||||
storage_.MemcpyFrom(other.storage_);
|
||||
other.storage_.SetInlinedSize(0);
|
||||
} else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) &&
|
||||
other.storage_.GetIsAllocated()) {
|
||||
|
@ -464,26 +464,22 @@ class InlinedVector {
|
|||
InlinedVector& operator=(InlinedVector&& other) {
|
||||
if (ABSL_PREDICT_FALSE(this == std::addressof(other))) return *this;
|
||||
|
||||
if (other.storage_.GetIsAllocated()) {
|
||||
clear();
|
||||
storage_.SetAllocatedSize(other.size());
|
||||
storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
|
||||
other.storage_.GetAllocatedCapacity());
|
||||
if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) {
|
||||
inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
|
||||
size());
|
||||
if (storage_.GetIsAllocated()) {
|
||||
AllocatorTraits::deallocate(*storage_.GetAllocPtr(),
|
||||
storage_.GetAllocatedData(),
|
||||
storage_.GetAllocatedCapacity());
|
||||
}
|
||||
storage_.MemcpyFrom(other.storage_);
|
||||
other.storage_.SetInlinedSize(0);
|
||||
} else {
|
||||
if (storage_.GetIsAllocated()) clear();
|
||||
// Both are inlined now.
|
||||
if (size() < other.size()) {
|
||||
auto mid = std::make_move_iterator(other.begin() + size());
|
||||
std::copy(std::make_move_iterator(other.begin()), mid, begin());
|
||||
UninitializedCopy(mid, std::make_move_iterator(other.end()), end());
|
||||
} else {
|
||||
auto new_end = std::copy(std::make_move_iterator(other.begin()),
|
||||
std::make_move_iterator(other.end()), begin());
|
||||
Destroy(new_end, end());
|
||||
}
|
||||
storage_.SetInlinedSize(other.size());
|
||||
storage_.Assign(IteratorValueAdapter<MoveIterator>(
|
||||
MoveIterator(other.storage_.GetInlinedData())),
|
||||
other.size());
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -491,23 +487,7 @@ class InlinedVector {
|
|||
//
|
||||
// Replaces the contents of the inlined vector with `n` copies of `v`.
|
||||
void assign(size_type n, const_reference v) {
|
||||
if (n <= size()) { // Possibly shrink
|
||||
std::fill_n(begin(), n, v);
|
||||
erase(begin() + n, end());
|
||||
return;
|
||||
}
|
||||
// Grow
|
||||
reserve(n);
|
||||
std::fill_n(begin(), size(), v);
|
||||
if (storage_.GetIsAllocated()) {
|
||||
UninitializedFill(storage_.GetAllocatedData() + size(),
|
||||
storage_.GetAllocatedData() + n, v);
|
||||
storage_.SetAllocatedSize(n);
|
||||
} else {
|
||||
UninitializedFill(storage_.GetInlinedData() + size(),
|
||||
storage_.GetInlinedData() + n, v);
|
||||
storage_.SetInlinedSize(n);
|
||||
}
|
||||
storage_.Assign(CopyValueAdapter(v), n);
|
||||
}
|
||||
|
||||
// Overload of `InlinedVector::assign()` to replace the contents of the
|
||||
|
@ -522,24 +502,8 @@ class InlinedVector {
|
|||
template <typename ForwardIterator,
|
||||
EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
|
||||
void assign(ForwardIterator first, ForwardIterator last) {
|
||||
auto length = std::distance(first, last);
|
||||
|
||||
// Prefer reassignment to copy construction for elements.
|
||||
if (static_cast<size_type>(length) <= size()) {
|
||||
erase(std::copy(first, last, begin()), end());
|
||||
return;
|
||||
}
|
||||
|
||||
reserve(length);
|
||||
iterator out = begin();
|
||||
for (; out != end(); ++first, ++out) *out = *first;
|
||||
if (storage_.GetIsAllocated()) {
|
||||
UninitializedCopy(first, last, out);
|
||||
storage_.SetAllocatedSize(length);
|
||||
} else {
|
||||
UninitializedCopy(first, last, out);
|
||||
storage_.SetInlinedSize(length);
|
||||
}
|
||||
storage_.Assign(IteratorValueAdapter<ForwardIterator>(first),
|
||||
std::distance(first, last));
|
||||
}
|
||||
|
||||
// Overload of `InlinedVector::assign()` to replace the contents of the
|
||||
|
@ -624,7 +588,15 @@ class InlinedVector {
|
|||
// of `v` starting at `pos`. Returns an `iterator` pointing to the first of
|
||||
// the newly inserted elements.
|
||||
iterator insert(const_iterator pos, size_type n, const_reference v) {
|
||||
return InsertWithCount(pos, n, v);
|
||||
assert(pos >= begin() && pos <= end());
|
||||
if (ABSL_PREDICT_FALSE(n == 0)) {
|
||||
return const_cast<iterator>(pos);
|
||||
}
|
||||
value_type copy = v;
|
||||
std::pair<iterator, iterator> it_pair = ShiftRight(pos, n);
|
||||
std::fill(it_pair.first, it_pair.second, copy);
|
||||
UninitializedFill(it_pair.second, it_pair.first + n, copy);
|
||||
return it_pair.first;
|
||||
}
|
||||
|
||||
// Overload of `InlinedVector::insert()` for copying the contents of the
|
||||
|
@ -644,7 +616,17 @@ class InlinedVector {
|
|||
EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
|
||||
iterator insert(const_iterator pos, ForwardIterator first,
|
||||
ForwardIterator last) {
|
||||
return InsertWithForwardRange(pos, first, last);
|
||||
assert(pos >= begin() && pos <= end());
|
||||
if (ABSL_PREDICT_FALSE(first == last)) {
|
||||
return const_cast<iterator>(pos);
|
||||
}
|
||||
auto n = std::distance(first, last);
|
||||
std::pair<iterator, iterator> it_pair = ShiftRight(pos, n);
|
||||
size_type used_spots = it_pair.second - it_pair.first;
|
||||
auto open_spot = std::next(first, used_spots);
|
||||
std::copy(first, open_spot, it_pair.first);
|
||||
UninitializedCopy(open_spot, last, it_pair.second);
|
||||
return it_pair.first;
|
||||
}
|
||||
|
||||
// Overload of `InlinedVector::insert()` for inserting elements constructed
|
||||
|
@ -696,17 +678,26 @@ class InlinedVector {
|
|||
reference emplace_back(Args&&... args) {
|
||||
size_type s = size();
|
||||
if (ABSL_PREDICT_FALSE(s == capacity())) {
|
||||
return GrowAndEmplaceBack(std::forward<Args>(args)...);
|
||||
}
|
||||
pointer space;
|
||||
if (storage_.GetIsAllocated()) {
|
||||
storage_.SetAllocatedSize(s + 1);
|
||||
space = storage_.GetAllocatedData();
|
||||
size_type new_capacity = 2 * capacity();
|
||||
pointer new_data =
|
||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
||||
reference new_element =
|
||||
Construct(new_data + s, std::forward<Args>(args)...);
|
||||
UninitializedCopy(std::make_move_iterator(data()),
|
||||
std::make_move_iterator(data() + s), new_data);
|
||||
ResetAllocation(new_data, new_capacity, s + 1);
|
||||
return new_element;
|
||||
} else {
|
||||
storage_.SetInlinedSize(s + 1);
|
||||
space = storage_.GetInlinedData();
|
||||
pointer space;
|
||||
if (storage_.GetIsAllocated()) {
|
||||
storage_.SetAllocatedSize(s + 1);
|
||||
space = storage_.GetAllocatedData();
|
||||
} else {
|
||||
storage_.SetInlinedSize(s + 1);
|
||||
space = storage_.GetInlinedData();
|
||||
}
|
||||
return Construct(space + s, std::forward<Args>(args)...);
|
||||
}
|
||||
return Construct(space + s, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// `InlinedVector::push_back()`
|
||||
|
@ -727,7 +718,7 @@ class InlinedVector {
|
|||
void pop_back() noexcept {
|
||||
assert(!empty());
|
||||
AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
|
||||
storage_.AddSize(-1);
|
||||
storage_.SubtractSize(1);
|
||||
}
|
||||
|
||||
// `InlinedVector::erase()`
|
||||
|
@ -794,10 +785,20 @@ class InlinedVector {
|
|||
// effects. Otherwise, `reserve()` will reallocate, performing an n-time
|
||||
// element-wise move of everything contained.
|
||||
void reserve(size_type n) {
|
||||
if (n > capacity()) {
|
||||
// Make room for new elements
|
||||
EnlargeBy(n - size());
|
||||
if (n <= capacity()) {
|
||||
return;
|
||||
}
|
||||
const size_type s = size();
|
||||
size_type target = (std::max)(static_cast<size_type>(N), n);
|
||||
size_type new_capacity = capacity();
|
||||
while (new_capacity < target) {
|
||||
new_capacity <<= 1;
|
||||
}
|
||||
pointer new_data =
|
||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
||||
UninitializedCopy(std::make_move_iterator(data()),
|
||||
std::make_move_iterator(data() + s), new_data);
|
||||
ResetAllocation(new_data, new_capacity, s);
|
||||
}
|
||||
|
||||
// `InlinedVector::shrink_to_fit()`
|
||||
|
@ -812,240 +813,21 @@ class InlinedVector {
|
|||
// If `size() > N` and `size() < capacity()` the elements will be moved to a
|
||||
// smaller heap allocation.
|
||||
void shrink_to_fit() {
|
||||
const auto s = size();
|
||||
if (ABSL_PREDICT_FALSE(!storage_.GetIsAllocated() || s == capacity()))
|
||||
return;
|
||||
|
||||
if (s <= N) {
|
||||
// Move the elements to the inlined storage.
|
||||
// We have to do this using a temporary, because `inlined_storage` and
|
||||
// `allocation_storage` are in a union field.
|
||||
auto temp = std::move(*this);
|
||||
assign(std::make_move_iterator(temp.begin()),
|
||||
std::make_move_iterator(temp.end()));
|
||||
return;
|
||||
if (storage_.GetIsAllocated()) {
|
||||
storage_.ShrinkToFit();
|
||||
}
|
||||
|
||||
// Reallocate storage and move elements.
|
||||
// We can't simply use the same approach as above, because `assign()` would
|
||||
// call into `reserve()` internally and reserve larger capacity than we need
|
||||
pointer new_data = AllocatorTraits::allocate(*storage_.GetAllocPtr(), s);
|
||||
UninitializedCopy(std::make_move_iterator(storage_.GetAllocatedData()),
|
||||
std::make_move_iterator(storage_.GetAllocatedData() + s),
|
||||
new_data);
|
||||
ResetAllocation(new_data, s, s);
|
||||
}
|
||||
|
||||
// `InlinedVector::swap()`
|
||||
//
|
||||
// Swaps the contents of this inlined vector with the contents of `other`.
|
||||
void swap(InlinedVector& other) {
|
||||
if (ABSL_PREDICT_FALSE(this == std::addressof(other))) return;
|
||||
|
||||
SwapImpl(other);
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename H, typename TheT, size_t TheN, typename TheA>
|
||||
friend H AbslHashValue(H h, const absl::InlinedVector<TheT, TheN, TheA>& a);
|
||||
|
||||
void ResetAllocation(pointer new_data, size_type new_capacity,
|
||||
size_type new_size) {
|
||||
if (storage_.GetIsAllocated()) {
|
||||
Destroy(storage_.GetAllocatedData(),
|
||||
storage_.GetAllocatedData() + size());
|
||||
assert(begin() == storage_.GetAllocatedData());
|
||||
AllocatorTraits::deallocate(*storage_.GetAllocPtr(),
|
||||
storage_.GetAllocatedData(),
|
||||
storage_.GetAllocatedCapacity());
|
||||
} else {
|
||||
Destroy(storage_.GetInlinedData(), storage_.GetInlinedData() + size());
|
||||
}
|
||||
|
||||
storage_.SetAllocatedData(new_data, new_capacity);
|
||||
storage_.SetAllocatedSize(new_size);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
reference Construct(pointer p, Args&&... args) {
|
||||
absl::allocator_traits<allocator_type>::construct(
|
||||
*storage_.GetAllocPtr(), p, std::forward<Args>(args)...);
|
||||
return *p;
|
||||
}
|
||||
|
||||
template <typename Iterator>
|
||||
void UninitializedCopy(Iterator src, Iterator src_last, pointer dst) {
|
||||
for (; src != src_last; ++dst, ++src) Construct(dst, *src);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void UninitializedFill(pointer dst, pointer dst_last, const Args&... args) {
|
||||
for (; dst != dst_last; ++dst) Construct(dst, args...);
|
||||
}
|
||||
|
||||
// Destroy [`from`, `to`) in place.
|
||||
void Destroy(pointer from, pointer to) {
|
||||
for (pointer cur = from; cur != to; ++cur) {
|
||||
absl::allocator_traits<allocator_type>::destroy(*storage_.GetAllocPtr(),
|
||||
cur);
|
||||
}
|
||||
#if !defined(NDEBUG)
|
||||
// Overwrite unused memory with `0xab` so we can catch uninitialized usage.
|
||||
// Cast to `void*` to tell the compiler that we don't care that we might be
|
||||
// scribbling on a vtable pointer.
|
||||
if (from != to) {
|
||||
auto len = sizeof(value_type) * std::distance(from, to);
|
||||
std::memset(reinterpret_cast<void*>(from), 0xab, len);
|
||||
}
|
||||
#endif // !defined(NDEBUG)
|
||||
}
|
||||
|
||||
// Enlarge the underlying representation so we can store `size_ + delta` elems
|
||||
// in allocated space. The size is not changed, and any newly added memory is
|
||||
// not initialized.
|
||||
void EnlargeBy(size_type delta) {
|
||||
const size_type s = size();
|
||||
assert(s <= capacity());
|
||||
|
||||
size_type target = (std::max)(static_cast<size_type>(N), s + delta);
|
||||
|
||||
// Compute new capacity by repeatedly doubling current capacity
|
||||
// TODO(psrc): Check and avoid overflow?
|
||||
size_type new_capacity = capacity();
|
||||
while (new_capacity < target) {
|
||||
new_capacity <<= 1;
|
||||
}
|
||||
|
||||
pointer new_data =
|
||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
||||
|
||||
UninitializedCopy(std::make_move_iterator(data()),
|
||||
std::make_move_iterator(data() + s), new_data);
|
||||
|
||||
ResetAllocation(new_data, new_capacity, s);
|
||||
}
|
||||
|
||||
// Shift all elements from `position` to `end()` by `n` places to the right.
|
||||
// If the vector needs to be enlarged, memory will be allocated.
|
||||
// Returns `iterator`s pointing to the start of the previously-initialized
|
||||
// portion and the start of the uninitialized portion of the created gap.
|
||||
// The number of initialized spots is `pair.second - pair.first`. The number
|
||||
// of raw spots is `n - (pair.second - pair.first)`.
|
||||
//
|
||||
// Updates the size of the InlinedVector internally.
|
||||
std::pair<iterator, iterator> ShiftRight(const_iterator position,
|
||||
size_type n) {
|
||||
iterator start_used = const_cast<iterator>(position);
|
||||
iterator start_raw = const_cast<iterator>(position);
|
||||
size_type s = size();
|
||||
size_type required_size = s + n;
|
||||
|
||||
if (required_size > capacity()) {
|
||||
// Compute new capacity by repeatedly doubling current capacity
|
||||
size_type new_capacity = capacity();
|
||||
while (new_capacity < required_size) {
|
||||
new_capacity <<= 1;
|
||||
}
|
||||
// Move everyone into the new allocation, leaving a gap of `n` for the
|
||||
// requested shift.
|
||||
pointer new_data =
|
||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
||||
size_type index = position - begin();
|
||||
UninitializedCopy(std::make_move_iterator(data()),
|
||||
std::make_move_iterator(data() + index), new_data);
|
||||
UninitializedCopy(std::make_move_iterator(data() + index),
|
||||
std::make_move_iterator(data() + s),
|
||||
new_data + index + n);
|
||||
ResetAllocation(new_data, new_capacity, s);
|
||||
|
||||
// New allocation means our iterator is invalid, so we'll recalculate.
|
||||
// Since the entire gap is in new space, there's no used space to reuse.
|
||||
start_raw = begin() + index;
|
||||
start_used = start_raw;
|
||||
} else {
|
||||
// If we had enough space, it's a two-part move. Elements going into
|
||||
// previously-unoccupied space need an `UninitializedCopy()`. Elements
|
||||
// going into a previously-occupied space are just a `std::move()`.
|
||||
iterator pos = const_cast<iterator>(position);
|
||||
iterator raw_space = end();
|
||||
size_type slots_in_used_space = raw_space - pos;
|
||||
size_type new_elements_in_used_space = (std::min)(n, slots_in_used_space);
|
||||
size_type new_elements_in_raw_space = n - new_elements_in_used_space;
|
||||
size_type old_elements_in_used_space =
|
||||
slots_in_used_space - new_elements_in_used_space;
|
||||
|
||||
UninitializedCopy(
|
||||
std::make_move_iterator(pos + old_elements_in_used_space),
|
||||
std::make_move_iterator(raw_space),
|
||||
raw_space + new_elements_in_raw_space);
|
||||
std::move_backward(pos, pos + old_elements_in_used_space, raw_space);
|
||||
|
||||
// If the gap is entirely in raw space, the used space starts where the
|
||||
// raw space starts, leaving no elements in used space. If the gap is
|
||||
// entirely in used space, the raw space starts at the end of the gap,
|
||||
// leaving all elements accounted for within the used space.
|
||||
start_used = pos;
|
||||
start_raw = pos + new_elements_in_used_space;
|
||||
}
|
||||
storage_.AddSize(n);
|
||||
return std::make_pair(start_used, start_raw);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
reference GrowAndEmplaceBack(Args&&... args) {
|
||||
assert(size() == capacity());
|
||||
const size_type s = size();
|
||||
|
||||
size_type new_capacity = 2 * capacity();
|
||||
pointer new_data =
|
||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
||||
|
||||
reference new_element =
|
||||
Construct(new_data + s, std::forward<Args>(args)...);
|
||||
UninitializedCopy(std::make_move_iterator(data()),
|
||||
std::make_move_iterator(data() + s), new_data);
|
||||
|
||||
ResetAllocation(new_data, new_capacity, s + 1);
|
||||
|
||||
return new_element;
|
||||
}
|
||||
|
||||
iterator InsertWithCount(const_iterator position, size_type n,
|
||||
const_reference v) {
|
||||
assert(position >= begin() && position <= end());
|
||||
if (ABSL_PREDICT_FALSE(n == 0)) return const_cast<iterator>(position);
|
||||
|
||||
value_type copy = v;
|
||||
std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
|
||||
std::fill(it_pair.first, it_pair.second, copy);
|
||||
UninitializedFill(it_pair.second, it_pair.first + n, copy);
|
||||
|
||||
return it_pair.first;
|
||||
}
|
||||
|
||||
template <typename ForwardIt>
|
||||
iterator InsertWithForwardRange(const_iterator position, ForwardIt first,
|
||||
ForwardIt last) {
|
||||
static_assert(absl::inlined_vector_internal::IsAtLeastForwardIterator<
|
||||
ForwardIt>::value,
|
||||
"");
|
||||
assert(position >= begin() && position <= end());
|
||||
|
||||
if (ABSL_PREDICT_FALSE(first == last))
|
||||
return const_cast<iterator>(position);
|
||||
|
||||
auto n = std::distance(first, last);
|
||||
std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
|
||||
size_type used_spots = it_pair.second - it_pair.first;
|
||||
auto open_spot = std::next(first, used_spots);
|
||||
std::copy(first, open_spot, it_pair.first);
|
||||
UninitializedCopy(open_spot, last, it_pair.second);
|
||||
return it_pair.first;
|
||||
}
|
||||
|
||||
void SwapImpl(InlinedVector& other) {
|
||||
using std::swap;
|
||||
|
||||
if (ABSL_PREDICT_FALSE(this == std::addressof(other))) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool is_allocated = storage_.GetIsAllocated();
|
||||
bool other_is_allocated = other.storage_.GetIsAllocated();
|
||||
|
||||
|
@ -1132,6 +914,127 @@ class InlinedVector {
|
|||
assert(a->size() == b_size);
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename H, typename TheT, size_t TheN, typename TheA>
|
||||
friend H AbslHashValue(H h, const absl::InlinedVector<TheT, TheN, TheA>& a);
|
||||
|
||||
void ResetAllocation(pointer new_data, size_type new_capacity,
|
||||
size_type new_size) {
|
||||
if (storage_.GetIsAllocated()) {
|
||||
Destroy(storage_.GetAllocatedData(),
|
||||
storage_.GetAllocatedData() + size());
|
||||
assert(begin() == storage_.GetAllocatedData());
|
||||
AllocatorTraits::deallocate(*storage_.GetAllocPtr(),
|
||||
storage_.GetAllocatedData(),
|
||||
storage_.GetAllocatedCapacity());
|
||||
} else {
|
||||
Destroy(storage_.GetInlinedData(), storage_.GetInlinedData() + size());
|
||||
}
|
||||
|
||||
storage_.SetAllocatedData(new_data, new_capacity);
|
||||
storage_.SetAllocatedSize(new_size);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
reference Construct(pointer p, Args&&... args) {
|
||||
absl::allocator_traits<allocator_type>::construct(
|
||||
*storage_.GetAllocPtr(), p, std::forward<Args>(args)...);
|
||||
return *p;
|
||||
}
|
||||
|
||||
template <typename Iterator>
|
||||
void UninitializedCopy(Iterator src, Iterator src_last, pointer dst) {
|
||||
for (; src != src_last; ++dst, ++src) Construct(dst, *src);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void UninitializedFill(pointer dst, pointer dst_last, const Args&... args) {
|
||||
for (; dst != dst_last; ++dst) Construct(dst, args...);
|
||||
}
|
||||
|
||||
// Destroy [`from`, `to`) in place.
|
||||
void Destroy(pointer from, pointer to) {
|
||||
for (pointer cur = from; cur != to; ++cur) {
|
||||
absl::allocator_traits<allocator_type>::destroy(*storage_.GetAllocPtr(),
|
||||
cur);
|
||||
}
|
||||
#if !defined(NDEBUG)
|
||||
// Overwrite unused memory with `0xab` so we can catch uninitialized usage.
|
||||
// Cast to `void*` to tell the compiler that we don't care that we might be
|
||||
// scribbling on a vtable pointer.
|
||||
if (from != to) {
|
||||
auto len = sizeof(value_type) * std::distance(from, to);
|
||||
std::memset(reinterpret_cast<void*>(from), 0xab, len);
|
||||
}
|
||||
#endif // !defined(NDEBUG)
|
||||
}
|
||||
|
||||
// Shift all elements from `position` to `end()` by `n` places to the right.
|
||||
// If the vector needs to be enlarged, memory will be allocated.
|
||||
// Returns `iterator`s pointing to the start of the previously-initialized
|
||||
// portion and the start of the uninitialized portion of the created gap.
|
||||
// The number of initialized spots is `pair.second - pair.first`. The number
|
||||
// of raw spots is `n - (pair.second - pair.first)`.
|
||||
//
|
||||
// Updates the size of the InlinedVector internally.
|
||||
std::pair<iterator, iterator> ShiftRight(const_iterator position,
|
||||
size_type n) {
|
||||
iterator start_used = const_cast<iterator>(position);
|
||||
iterator start_raw = const_cast<iterator>(position);
|
||||
size_type s = size();
|
||||
size_type required_size = s + n;
|
||||
|
||||
if (required_size > capacity()) {
|
||||
// Compute new capacity by repeatedly doubling current capacity
|
||||
size_type new_capacity = capacity();
|
||||
while (new_capacity < required_size) {
|
||||
new_capacity <<= 1;
|
||||
}
|
||||
// Move everyone into the new allocation, leaving a gap of `n` for the
|
||||
// requested shift.
|
||||
pointer new_data =
|
||||
AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity);
|
||||
size_type index = position - begin();
|
||||
UninitializedCopy(std::make_move_iterator(data()),
|
||||
std::make_move_iterator(data() + index), new_data);
|
||||
UninitializedCopy(std::make_move_iterator(data() + index),
|
||||
std::make_move_iterator(data() + s),
|
||||
new_data + index + n);
|
||||
ResetAllocation(new_data, new_capacity, s);
|
||||
|
||||
// New allocation means our iterator is invalid, so we'll recalculate.
|
||||
// Since the entire gap is in new space, there's no used space to reuse.
|
||||
start_raw = begin() + index;
|
||||
start_used = start_raw;
|
||||
} else {
|
||||
// If we had enough space, it's a two-part move. Elements going into
|
||||
// previously-unoccupied space need an `UninitializedCopy()`. Elements
|
||||
// going into a previously-occupied space are just a `std::move()`.
|
||||
iterator pos = const_cast<iterator>(position);
|
||||
iterator raw_space = end();
|
||||
size_type slots_in_used_space = raw_space - pos;
|
||||
size_type new_elements_in_used_space = (std::min)(n, slots_in_used_space);
|
||||
size_type new_elements_in_raw_space = n - new_elements_in_used_space;
|
||||
size_type old_elements_in_used_space =
|
||||
slots_in_used_space - new_elements_in_used_space;
|
||||
|
||||
UninitializedCopy(
|
||||
std::make_move_iterator(pos + old_elements_in_used_space),
|
||||
std::make_move_iterator(raw_space),
|
||||
raw_space + new_elements_in_raw_space);
|
||||
std::move_backward(pos, pos + old_elements_in_used_space, raw_space);
|
||||
|
||||
// If the gap is entirely in raw space, the used space starts where the
|
||||
// raw space starts, leaving no elements in used space. If the gap is
|
||||
// entirely in used space, the raw space starts at the end of the gap,
|
||||
// leaving all elements accounted for within the used space.
|
||||
start_used = pos;
|
||||
start_raw = pos + new_elements_in_used_space;
|
||||
}
|
||||
storage_.AddSize(n);
|
||||
return std::make_pair(start_used, start_raw);
|
||||
}
|
||||
|
||||
Storage storage_;
|
||||
};
|
||||
|
||||
|
|
|
@ -599,6 +599,146 @@ void BM_AssignFromMove(benchmark::State& state) {
|
|||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize, size_t ToSize>
|
||||
void BM_ResizeSize(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[](InlVec<T>* vec, size_t) { vec->resize(ToSize); });
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize, size_t ToSize>
|
||||
void BM_ResizeSizeRef(benchmark::State& state) {
|
||||
auto t = T();
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[&](InlVec<T>* vec, size_t) {
|
||||
benchmark::DoNotOptimize(t);
|
||||
vec->resize(ToSize, t);
|
||||
});
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize, size_t ToSize>
|
||||
void BM_InsertSizeRef(benchmark::State& state) {
|
||||
auto t = T();
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[&](InlVec<T>* vec, size_t) {
|
||||
benchmark::DoNotOptimize(t);
|
||||
auto* pos = vec->data() + (vec->size() / 2);
|
||||
vec->insert(pos, t);
|
||||
});
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize, size_t ToSize>
|
||||
void BM_InsertRange(benchmark::State& state) {
|
||||
InlVec<T> other_vec(ToSize);
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[&](InlVec<T>* vec, size_t) {
|
||||
benchmark::DoNotOptimize(other_vec);
|
||||
auto* pos = vec->data() + (vec->size() / 2);
|
||||
vec->insert(pos, other_vec.begin(), other_vec.end());
|
||||
});
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize>
|
||||
void BM_EmplaceBack(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[](InlVec<T>* vec, size_t) { vec->emplace_back(); });
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize>
|
||||
void BM_PopBack(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[](InlVec<T>* vec, size_t) { vec->pop_back(); });
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize>
|
||||
void BM_EraseOne(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
auto* pos = vec->data() + (vec->size() / 2);
|
||||
vec->erase(pos);
|
||||
});
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize>
|
||||
void BM_EraseRange(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
auto* pos = vec->data() + (vec->size() / 2);
|
||||
vec->erase(pos, pos + 1);
|
||||
});
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize>
|
||||
void BM_Clear(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
|
@ -609,4 +749,56 @@ void BM_Clear(benchmark::State& state) {
|
|||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize, size_t ToCapacity>
|
||||
void BM_Reserve(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); });
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromCapacity, size_t ToCapacity>
|
||||
void BM_ShrinkToFit(benchmark::State& state) {
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[](InlVec<T>* vec, size_t) {
|
||||
vec->clear();
|
||||
vec->resize(ToCapacity);
|
||||
vec->reserve(FromCapacity);
|
||||
},
|
||||
/* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); });
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType);
|
||||
|
||||
template <typename T, size_t FromSize, size_t ToSize>
|
||||
void BM_Swap(benchmark::State& state) {
|
||||
using VecT = InlVec<T>;
|
||||
std::array<VecT, kBatchSize> vector_batch{};
|
||||
BatchedBenchmark<T>(
|
||||
state,
|
||||
/* prepare_vec = */
|
||||
[&](InlVec<T>* vec, size_t i) {
|
||||
vector_batch[i].clear();
|
||||
vector_batch[i].resize(ToSize);
|
||||
vec->resize(FromSize);
|
||||
},
|
||||
/* test_vec = */
|
||||
[&](InlVec<T>* vec, size_t i) {
|
||||
using std::swap;
|
||||
benchmark::DoNotOptimize(vector_batch[i]);
|
||||
swap(*vec, vector_batch[i]);
|
||||
});
|
||||
}
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType);
|
||||
ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType);
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -12,7 +12,11 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <array>
|
||||
#include <initializer_list>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "absl/base/internal/exception_safety_testing.h"
|
||||
|
@ -81,6 +85,24 @@ using OneSizeTestParams =
|
|||
TestParams<ThrowAllocMovableThrowerVec, kLargeSize>,
|
||||
TestParams<ThrowAllocMovableThrowerVec, kSmallSize>>;
|
||||
|
||||
using TwoSizeTestParams = ::testing::Types<
|
||||
TestParams<ThrowerVec, kLargeSize, kLargeSize>,
|
||||
TestParams<ThrowerVec, kLargeSize, kSmallSize>,
|
||||
TestParams<ThrowerVec, kSmallSize, kLargeSize>,
|
||||
TestParams<ThrowerVec, kSmallSize, kSmallSize>,
|
||||
TestParams<MovableThrowerVec, kLargeSize, kLargeSize>,
|
||||
TestParams<MovableThrowerVec, kLargeSize, kSmallSize>,
|
||||
TestParams<MovableThrowerVec, kSmallSize, kLargeSize>,
|
||||
TestParams<MovableThrowerVec, kSmallSize, kSmallSize>,
|
||||
TestParams<ThrowAllocThrowerVec, kLargeSize, kLargeSize>,
|
||||
TestParams<ThrowAllocThrowerVec, kLargeSize, kSmallSize>,
|
||||
TestParams<ThrowAllocThrowerVec, kSmallSize, kLargeSize>,
|
||||
TestParams<ThrowAllocThrowerVec, kSmallSize, kSmallSize>,
|
||||
TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kLargeSize>,
|
||||
TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kSmallSize>,
|
||||
TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kLargeSize>,
|
||||
TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kSmallSize>>;
|
||||
|
||||
template <typename>
|
||||
struct NoSizeTest : ::testing::Test {};
|
||||
TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams);
|
||||
|
@ -89,6 +111,25 @@ template <typename>
|
|||
struct OneSizeTest : ::testing::Test {};
|
||||
TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams);
|
||||
|
||||
template <typename>
|
||||
struct TwoSizeTest : ::testing::Test {};
|
||||
TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams);
|
||||
|
||||
template <typename VecT>
|
||||
bool InlinedVectorInvariants(VecT* vec) {
|
||||
if (*vec != *vec) return false;
|
||||
if (vec->size() > vec->capacity()) return false;
|
||||
if (vec->size() > vec->max_size()) return false;
|
||||
if (vec->capacity() > vec->max_size()) return false;
|
||||
if (vec->data() != std::addressof(vec->at(0))) return false;
|
||||
if (vec->data() != vec->begin()) return false;
|
||||
if (*vec->data() != *vec->begin()) return false;
|
||||
if (vec->begin() > vec->end()) return false;
|
||||
if ((vec->end() - vec->begin()) != vec->size()) return false;
|
||||
if (std::distance(vec->begin(), vec->end()) != vec->size()) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Function that always returns false is correct, but refactoring is required
|
||||
// for clarity. It's needed to express that, as a contract, certain operations
|
||||
// should not throw at all. Execution of this function means an exception was
|
||||
|
@ -179,6 +220,45 @@ TYPED_TEST(OneSizeTest, MoveConstructor) {
|
|||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(TwoSizeTest, Assign) {
|
||||
using VecT = typename TypeParam::VecT;
|
||||
using value_type = typename VecT::value_type;
|
||||
constexpr static auto from_size = TypeParam::GetSizeAt(0);
|
||||
constexpr static auto to_size = TypeParam::GetSizeAt(1);
|
||||
|
||||
auto tester = testing::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(VecT{from_size})
|
||||
.WithContracts(InlinedVectorInvariants<VecT>);
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
*vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size);
|
||||
}));
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
VecT other_vec{to_size};
|
||||
*vec = other_vec;
|
||||
}));
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
VecT other_vec{to_size};
|
||||
*vec = std::move(other_vec);
|
||||
}));
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
value_type val{};
|
||||
vec->assign(to_size, val);
|
||||
}));
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size));
|
||||
}));
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
std::array<value_type, to_size> arr{};
|
||||
vec->assign(arr.begin(), arr.end());
|
||||
}));
|
||||
}
|
||||
|
||||
TYPED_TEST(OneSizeTest, PopBack) {
|
||||
using VecT = typename TypeParam::VecT;
|
||||
constexpr static auto size = TypeParam::GetSizeAt(0);
|
||||
|
@ -205,4 +285,17 @@ TYPED_TEST(OneSizeTest, Clear) {
|
|||
}));
|
||||
}
|
||||
|
||||
TYPED_TEST(OneSizeTest, ShrinkToFit) {
|
||||
using VecT = typename TypeParam::VecT;
|
||||
constexpr static auto size = TypeParam::GetSizeAt(0);
|
||||
|
||||
auto tester = testing::MakeExceptionSafetyTester()
|
||||
.WithInitialValue(VecT{size})
|
||||
.WithContracts(InlinedVectorInvariants<VecT>);
|
||||
|
||||
EXPECT_TRUE(tester.Test([](VecT* vec) {
|
||||
vec->shrink_to_fit(); //
|
||||
}));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -190,6 +190,12 @@ TEST(IntVec, SimpleOps) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(IntVec, PopBackNoOverflow) {
|
||||
IntVec v = {1};
|
||||
v.pop_back();
|
||||
EXPECT_EQ(v.size(), 0);
|
||||
}
|
||||
|
||||
TEST(IntVec, AtThrows) {
|
||||
IntVec v = {1, 2, 3};
|
||||
EXPECT_EQ(v.at(2), 3);
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
|
||||
#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
|
||||
|
||||
#include <initializer_list>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
@ -75,17 +76,30 @@ constexpr bool IsFinal() {
|
|||
#endif
|
||||
}
|
||||
|
||||
// We can't use EBCO on other CompressedTuples because that would mean that we
|
||||
// derive from multiple Storage<> instantiations with the same I parameter,
|
||||
// and potentially from multiple identical Storage<> instantiations. So anytime
|
||||
// we use type inheritance rather than encapsulation, we mark
|
||||
// CompressedTupleImpl, to make this easy to detect.
|
||||
struct uses_inheritance {};
|
||||
|
||||
template <typename T>
|
||||
constexpr bool ShouldUseBase() {
|
||||
return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
|
||||
return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
|
||||
!std::is_base_of<uses_inheritance, T>::value;
|
||||
}
|
||||
|
||||
// The storage class provides two specializations:
|
||||
// - For empty classes, it stores T as a base class.
|
||||
// - For everything else, it stores T as a member.
|
||||
template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
|
||||
template <typename T, size_t I,
|
||||
#if defined(_MSC_VER)
|
||||
bool UseBase =
|
||||
ShouldUseBase<typename std::enable_if<true, T>::type>()>
|
||||
#else
|
||||
bool UseBase = ShouldUseBase<T>()>
|
||||
#endif
|
||||
struct Storage {
|
||||
using T = ElemT<D, I>;
|
||||
T value;
|
||||
constexpr Storage() = default;
|
||||
explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {}
|
||||
|
@ -95,10 +109,8 @@ struct Storage {
|
|||
T&& get() && { return std::move(*this).value; }
|
||||
};
|
||||
|
||||
template <typename D, size_t I>
|
||||
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
|
||||
: ElemT<D, I> {
|
||||
using T = internal_compressed_tuple::ElemT<D, I>;
|
||||
template <typename T, size_t I>
|
||||
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
|
||||
constexpr Storage() = default;
|
||||
explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {}
|
||||
constexpr const T& get() const& { return *this; }
|
||||
|
@ -107,29 +119,54 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
|
|||
T&& get() && { return std::move(*this); }
|
||||
};
|
||||
|
||||
template <typename D, typename I>
|
||||
template <typename D, typename I, bool ShouldAnyUseBase>
|
||||
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
|
||||
|
||||
template <typename... Ts, size_t... I>
|
||||
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
|
||||
CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>>
|
||||
template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
|
||||
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
|
||||
CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
|
||||
// We use the dummy identity function through std::integral_constant to
|
||||
// convince MSVC of accepting and expanding I in that context. Without it
|
||||
// you would get:
|
||||
// error C3548: 'I': parameter pack cannot be used in this context
|
||||
: Storage<CompressedTuple<Ts...>,
|
||||
std::integral_constant<size_t, I>::value>... {
|
||||
: uses_inheritance,
|
||||
Storage<Ts, std::integral_constant<size_t, I>::value>... {
|
||||
constexpr CompressedTupleImpl() = default;
|
||||
explicit constexpr CompressedTupleImpl(Ts&&... args)
|
||||
: Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {}
|
||||
: Storage<Ts, I>(absl::forward<Ts>(args))... {}
|
||||
friend CompressedTuple<Ts...>;
|
||||
};
|
||||
|
||||
template <typename... Ts, size_t... I>
|
||||
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
|
||||
CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
|
||||
// We use the dummy identity function as above...
|
||||
: Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
|
||||
constexpr CompressedTupleImpl() = default;
|
||||
explicit constexpr CompressedTupleImpl(Ts&&... args)
|
||||
: Storage<Ts, I, false>(absl::forward<Ts>(args))... {}
|
||||
friend CompressedTuple<Ts...>;
|
||||
};
|
||||
|
||||
std::false_type Or(std::initializer_list<std::false_type>);
|
||||
std::true_type Or(std::initializer_list<bool>);
|
||||
|
||||
// MSVC requires this to be done separately rather than within the declaration
|
||||
// of CompressedTuple below.
|
||||
template <typename... Ts>
|
||||
constexpr bool ShouldAnyUseBase() {
|
||||
return decltype(
|
||||
Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
|
||||
}
|
||||
|
||||
} // namespace internal_compressed_tuple
|
||||
|
||||
// Helper class to perform the Empty Base Class Optimization.
|
||||
// Ts can contain classes and non-classes, empty or not. For the ones that
|
||||
// are empty classes, we perform the CompressedTuple. If all types in Ts are
|
||||
// empty classes, then CompressedTuple<Ts...> is itself an empty class.
|
||||
// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
|
||||
// does not apply when one or more of those empty classes is itself an empty
|
||||
// CompressedTuple.)
|
||||
//
|
||||
// To access the members, use member .get<N>() function.
|
||||
//
|
||||
|
@ -145,7 +182,8 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
|
|||
template <typename... Ts>
|
||||
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
|
||||
: private internal_compressed_tuple::CompressedTupleImpl<
|
||||
CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> {
|
||||
CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
|
||||
internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
|
||||
private:
|
||||
template <int I>
|
||||
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
|
||||
|
@ -157,24 +195,24 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
|
|||
|
||||
template <int I>
|
||||
ElemT<I>& get() & {
|
||||
return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
|
||||
return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
|
||||
}
|
||||
|
||||
template <int I>
|
||||
constexpr const ElemT<I>& get() const& {
|
||||
return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
|
||||
return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
|
||||
}
|
||||
|
||||
template <int I>
|
||||
ElemT<I>&& get() && {
|
||||
return std::move(*this)
|
||||
.internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
|
||||
.internal_compressed_tuple::template Storage<ElemT<I>, I>::get();
|
||||
}
|
||||
|
||||
template <int I>
|
||||
constexpr const ElemT<I>&& get() const&& {
|
||||
return absl::move(*this)
|
||||
.internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
|
||||
.internal_compressed_tuple::template Storage<ElemT<I>, I>::get();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
#include "absl/memory/memory.h"
|
||||
#include "absl/utility/utility.h"
|
||||
|
||||
namespace absl {
|
||||
namespace container_internal {
|
||||
namespace {
|
||||
|
||||
// These are declared at global scope purely so that error messages
|
||||
// are smaller and easier to understand.
|
||||
enum class CallType { kConstRef, kConstMove };
|
||||
|
||||
template <int>
|
||||
|
@ -45,6 +43,10 @@ struct TwoValues {
|
|||
U value2;
|
||||
};
|
||||
|
||||
namespace absl {
|
||||
namespace container_internal {
|
||||
namespace {
|
||||
|
||||
TEST(CompressedTupleTest, Sizeof) {
|
||||
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
|
||||
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
|
||||
|
@ -120,9 +122,14 @@ TEST(CompressedTupleTest, Nested) {
|
|||
EXPECT_EQ(4 * sizeof(char),
|
||||
sizeof(CompressedTuple<CompressedTuple<char, char>,
|
||||
CompressedTuple<char, char>>));
|
||||
EXPECT_TRUE(
|
||||
(std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>,
|
||||
CompressedTuple<Empty<1>>>>::value));
|
||||
EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
|
||||
|
||||
// Make sure everything still works when things are nested.
|
||||
struct CT_Empty : CompressedTuple<Empty<0>> {};
|
||||
CompressedTuple<Empty<0>, CT_Empty> nested_empty;
|
||||
auto contained = nested_empty.get<0>();
|
||||
auto nested = nested_empty.get<1>().get<0>();
|
||||
EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
|
||||
}
|
||||
|
||||
TEST(CompressedTupleTest, Reference) {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "absl/container/internal/compressed_tuple.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/meta/type_traits.h"
|
||||
#include "absl/types/span.h"
|
||||
|
||||
namespace absl {
|
||||
namespace inlined_vector_internal {
|
||||
|
@ -78,6 +79,14 @@ void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first,
|
|||
}
|
||||
}
|
||||
|
||||
template <typename ValueType, typename ValueAdapter, typename SizeType>
|
||||
void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr,
|
||||
SizeType assign_size) {
|
||||
for (SizeType i = 0; i < assign_size; ++i) {
|
||||
values_ptr->AssignNext(assign_first + i);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename AllocatorType>
|
||||
struct StorageView {
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
|
@ -101,6 +110,11 @@ class IteratorValueAdapter {
|
|||
++it_;
|
||||
}
|
||||
|
||||
void AssignNext(pointer assign_at) {
|
||||
*assign_at = *it_;
|
||||
++it_;
|
||||
}
|
||||
|
||||
private:
|
||||
Iterator it_;
|
||||
};
|
||||
|
@ -119,6 +133,8 @@ class CopyValueAdapter {
|
|||
AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
|
||||
}
|
||||
|
||||
void AssignNext(pointer assign_at) { *assign_at = *ptr_; }
|
||||
|
||||
private:
|
||||
const_pointer ptr_;
|
||||
};
|
||||
|
@ -135,6 +151,44 @@ class DefaultValueAdapter {
|
|||
void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
|
||||
AllocatorTraits::construct(*alloc_ptr, construct_at);
|
||||
}
|
||||
|
||||
void AssignNext(pointer assign_at) { *assign_at = value_type(); }
|
||||
};
|
||||
|
||||
template <typename AllocatorType>
|
||||
class AllocationTransaction {
|
||||
using value_type = typename AllocatorType::value_type;
|
||||
using pointer = typename AllocatorType::pointer;
|
||||
using size_type = typename AllocatorType::size_type;
|
||||
using AllocatorTraits = absl::allocator_traits<AllocatorType>;
|
||||
|
||||
public:
|
||||
explicit AllocationTransaction(AllocatorType* alloc_ptr)
|
||||
: alloc_data_(*alloc_ptr, nullptr) {}
|
||||
|
||||
AllocationTransaction(const AllocationTransaction&) = delete;
|
||||
void operator=(const AllocationTransaction&) = delete;
|
||||
|
||||
AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
|
||||
pointer& GetData() { return alloc_data_.template get<1>(); }
|
||||
size_type& GetCapacity() { return capacity_; }
|
||||
|
||||
bool DidAllocate() { return GetData() != nullptr; }
|
||||
pointer Allocate(size_type capacity) {
|
||||
GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
|
||||
GetCapacity() = capacity;
|
||||
return GetData();
|
||||
}
|
||||
|
||||
~AllocationTransaction() {
|
||||
if (DidAllocate()) {
|
||||
AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
|
||||
size_type capacity_ = 0;
|
||||
};
|
||||
|
||||
template <typename T, size_t N, typename A>
|
||||
|
@ -167,6 +221,9 @@ class Storage {
|
|||
using DefaultValueAdapter =
|
||||
inlined_vector_internal::DefaultValueAdapter<allocator_type>;
|
||||
|
||||
using AllocationTransaction =
|
||||
inlined_vector_internal::AllocationTransaction<allocator_type>;
|
||||
|
||||
Storage() : metadata_() {}
|
||||
|
||||
explicit Storage(const allocator_type& alloc)
|
||||
|
@ -215,19 +272,48 @@ class Storage {
|
|||
|
||||
void SetIsAllocated() { GetSizeAndIsAllocated() |= 1; }
|
||||
|
||||
void UnsetIsAllocated() {
|
||||
SetIsAllocated();
|
||||
GetSizeAndIsAllocated() -= 1;
|
||||
}
|
||||
|
||||
void SetAllocatedSize(size_type size) {
|
||||
GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
|
||||
}
|
||||
|
||||
void SetInlinedSize(size_type size) { GetSizeAndIsAllocated() = size << 1; }
|
||||
|
||||
void SetSize(size_type size) {
|
||||
GetSizeAndIsAllocated() =
|
||||
(size << 1) | static_cast<size_type>(GetIsAllocated());
|
||||
}
|
||||
|
||||
void AddSize(size_type count) { GetSizeAndIsAllocated() += count << 1; }
|
||||
|
||||
void SubtractSize(size_type count) {
|
||||
assert(count <= GetSize());
|
||||
GetSizeAndIsAllocated() -= count << 1;
|
||||
}
|
||||
|
||||
void SetAllocatedData(pointer data, size_type capacity) {
|
||||
data_.allocated.allocated_data = data;
|
||||
data_.allocated.allocated_capacity = capacity;
|
||||
}
|
||||
|
||||
void DeallocateIfAllocated() {
|
||||
if (GetIsAllocated()) {
|
||||
AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
|
||||
GetAllocatedCapacity());
|
||||
}
|
||||
}
|
||||
|
||||
void AcquireAllocation(AllocationTransaction* allocation_tx_ptr) {
|
||||
SetAllocatedData(allocation_tx_ptr->GetData(),
|
||||
allocation_tx_ptr->GetCapacity());
|
||||
allocation_tx_ptr->GetData() = nullptr;
|
||||
allocation_tx_ptr->GetCapacity() = 0;
|
||||
}
|
||||
|
||||
void SwapSizeAndIsAllocated(Storage* other) {
|
||||
using std::swap;
|
||||
swap(GetSizeAndIsAllocated(), other->GetSizeAndIsAllocated());
|
||||
|
@ -238,11 +324,11 @@ class Storage {
|
|||
swap(data_.allocated, other->data_.allocated);
|
||||
}
|
||||
|
||||
void MemcpyContents(const Storage& other) {
|
||||
assert(IsMemcpyOk::value);
|
||||
void MemcpyFrom(const Storage& other_storage) {
|
||||
assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
|
||||
|
||||
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
|
||||
data_ = other.data_;
|
||||
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
|
||||
data_ = other_storage.data_;
|
||||
}
|
||||
|
||||
void DestroyAndDeallocate();
|
||||
|
@ -250,6 +336,11 @@ class Storage {
|
|||
template <typename ValueAdapter>
|
||||
void Initialize(ValueAdapter values, size_type new_size);
|
||||
|
||||
template <typename ValueAdapter>
|
||||
void Assign(ValueAdapter values, size_type new_size);
|
||||
|
||||
void ShrinkToFit();
|
||||
|
||||
private:
|
||||
size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
|
||||
|
||||
|
@ -282,15 +373,10 @@ class Storage {
|
|||
|
||||
template <typename T, size_t N, typename A>
|
||||
void Storage<T, N, A>::DestroyAndDeallocate() {
|
||||
StorageView storage_view = MakeStorageView();
|
||||
|
||||
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
|
||||
storage_view.size);
|
||||
|
||||
if (GetIsAllocated()) {
|
||||
AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
|
||||
storage_view.capacity);
|
||||
}
|
||||
inlined_vector_internal::DestroyElements(
|
||||
GetAllocPtr(), (GetIsAllocated() ? GetAllocatedData() : GetInlinedData()),
|
||||
GetSize());
|
||||
DeallocateIfAllocated();
|
||||
}
|
||||
|
||||
template <typename T, size_t N, typename A>
|
||||
|
@ -323,6 +409,91 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
|
|||
AddSize(new_size);
|
||||
}
|
||||
|
||||
template <typename T, size_t N, typename A>
|
||||
template <typename ValueAdapter>
|
||||
auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
|
||||
StorageView storage_view = MakeStorageView();
|
||||
|
||||
AllocationTransaction allocation_tx(GetAllocPtr());
|
||||
|
||||
absl::Span<value_type> assign_loop;
|
||||
absl::Span<value_type> construct_loop;
|
||||
absl::Span<value_type> destroy_loop;
|
||||
|
||||
if (new_size > storage_view.capacity) {
|
||||
construct_loop = {allocation_tx.Allocate(new_size), new_size};
|
||||
destroy_loop = {storage_view.data, storage_view.size};
|
||||
} else if (new_size > storage_view.size) {
|
||||
assign_loop = {storage_view.data, storage_view.size};
|
||||
construct_loop = {storage_view.data + storage_view.size,
|
||||
new_size - storage_view.size};
|
||||
} else {
|
||||
assign_loop = {storage_view.data, new_size};
|
||||
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
|
||||
}
|
||||
|
||||
inlined_vector_internal::AssignElements(assign_loop.data(), &values,
|
||||
assign_loop.size());
|
||||
inlined_vector_internal::ConstructElements(
|
||||
GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
|
||||
inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
|
||||
destroy_loop.size());
|
||||
|
||||
if (allocation_tx.DidAllocate()) {
|
||||
DeallocateIfAllocated();
|
||||
AcquireAllocation(&allocation_tx);
|
||||
SetIsAllocated();
|
||||
}
|
||||
|
||||
SetSize(new_size);
|
||||
}
|
||||
|
||||
template <typename T, size_t N, typename A>
|
||||
auto Storage<T, N, A>::ShrinkToFit() -> void {
|
||||
// May only be called on allocated instances!
|
||||
assert(GetIsAllocated());
|
||||
|
||||
StorageView storage_view = {GetAllocatedData(), GetSize(),
|
||||
GetAllocatedCapacity()};
|
||||
|
||||
AllocationTransaction allocation_tx(GetAllocPtr());
|
||||
|
||||
IteratorValueAdapter<MoveIterator> move_values(
|
||||
MoveIterator(storage_view.data));
|
||||
|
||||
pointer construct_data;
|
||||
|
||||
if (storage_view.size <= static_cast<size_type>(N)) {
|
||||
construct_data = GetInlinedData();
|
||||
} else if (storage_view.size < GetAllocatedCapacity()) {
|
||||
construct_data = allocation_tx.Allocate(storage_view.size);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
ABSL_INTERNAL_TRY {
|
||||
inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
|
||||
&move_values, storage_view.size);
|
||||
}
|
||||
ABSL_INTERNAL_CATCH_ANY {
|
||||
// Writing to inlined data will trample on the existing state, thus it needs
|
||||
// to be restored when a construction fails.
|
||||
SetAllocatedData(storage_view.data, storage_view.capacity);
|
||||
ABSL_INTERNAL_RETHROW;
|
||||
}
|
||||
|
||||
inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
|
||||
storage_view.size);
|
||||
AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
|
||||
storage_view.capacity);
|
||||
|
||||
if (allocation_tx.DidAllocate()) {
|
||||
AcquireAllocation(&allocation_tx);
|
||||
} else {
|
||||
UnsetIsAllocated();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace inlined_vector_internal
|
||||
} // namespace absl
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue