neoGFX
Cross-platform C++ app/game engine
Loading...
Searching...
No Matches
plf_hive.h
Go to the documentation of this file.
1// Copyright (c) 2023, Matthew Bentley (mattreecebentley@gmail.com) www.plflib.org
2
3// zLib license (https://www.zlib.net/zlib_license.html):
4// This software is provided 'as-is', without any express or implied
5// warranty. In no event will the authors be held liable for any damages
6// arising from the use of this software.
7//
8// Permission is granted to anyone to use this software for any purpose,
9// including commercial applications, and to alter it and redistribute it
10// freely, subject to the following restrictions:
11//
12// 1. The origin of this software must not be misrepresented; you must not
13// claim that you wrote the original software. If you use this software
14// in a product, an acknowledgement in the product documentation would be
15// appreciated but is not required.
16// 2. Altered source versions must be plainly marked as such, and must not be
17// misrepresented as being the original software.
18// 3. This notice may not be removed or altered from any source distribution.
19
20
21#ifndef PLF_HIVE_H
22#define PLF_HIVE_H
23#define __cpp_lib_hive
24
25
26#define PLF_EXCEPTIONS_SUPPORT
27
28#if ((defined(__clang__) || defined(__GNUC__)) && !defined(__EXCEPTIONS)) || (defined(_MSC_VER) && !defined(_CPPUNWIND))
29 #undef PLF_EXCEPTIONS_SUPPORT
30 #include <exception> // std::terminate
31#endif
32
33#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__)
34 // Suppress incorrect (unfixed MSVC bug at warning level 4) warnings re: constant expressions in constexpr-if statements
35 #pragma warning ( push )
36 #pragma warning ( disable : 4127 )
37#endif
38
39#include <algorithm> // std::fill_n, std::sort
40#include <cassert> // assert
41#include <cstring> // memset, memcpy, size_t
42#include <limits> // std::numeric_limits
43#include <memory> // std::allocator, std::to_address
44#include <iterator> // std::bidirectional_iterator_tag, iterator_traits, make_move_iterator, std::distance for range insert
45#include <stdexcept> // std::length_error
46#include <functional> // std::less
47#include <cstddef> // offsetof, used in blank()
48#include <type_traits> // std::is_trivially_destructible, enable_if_t, type_identity_t, etc
49#include <utility> // std::move
50#include <initializer_list>
51#include <concepts>
52#include <compare> // std::strong_ordering
53#include <ranges>
54
55
56
57namespace plf
58{
59 // For getting std:: overloads to match hive iterators specifically:
60 template <class T>
61 concept hive_iterator_concept = requires { typename T::hive_iterator_tag; };
62
63 #ifndef PLF_FROM_RANGE // To ensure interoperability with other plf lib containers
64 #define PLF_FROM_RANGE
65
66 // Until such point as standard libraries include std::ranges::from_range_t, including this so the rangesv3 constructor overloads will work unambiguously:
67 namespace ranges
68 {
69 struct from_range_t {};
70 inline constexpr from_range_t from_range;
71 }
72 #endif
73}
74
75
76
77// Overloads for advance etc must be defined here as they are called by range-assign/insert functions - otherwise compiler will call std:: bidirectional overloads:
78namespace std
79{
80 template <plf::hive_iterator_concept it_type, typename distance_type>
81 void advance(it_type &it, const distance_type distance)
82 {
83 it.advance(static_cast<typename iterator_traits<it_type>::difference_type>(distance));
84 }
85
86
87
88 template <plf::hive_iterator_concept it_type>
89 it_type next(it_type it, const typename iterator_traits<it_type>::difference_type distance = 1)
90 {
91 it.advance(distance);
92 return it;
93 }
94
95
96
97 template <plf::hive_iterator_concept it_type>
98 it_type prev(it_type it, const typename iterator_traits<it_type>::difference_type distance = 1)
99 {
100 it.advance(-distance);
101 return it;
102 }
103
104
105
106 template <plf::hive_iterator_concept it_type>
107 typename iterator_traits<it_type>::difference_type distance(const it_type first, const it_type last)
108 {
109 return first.distance(last);
110 }
111}
112
113
114
115namespace plf
116{
117
118
119struct hive_limits // for use in block_capacity setting/getting functions and constructors
120{
121 size_t min, max;
122 constexpr hive_limits(const size_t minimum, const size_t maximum) noexcept : min(minimum), max(maximum) {}
123};
124
125
126
127template <class element_type, class allocator_type = std::allocator<element_type> >
128class hive : private allocator_type // Empty base class optimisation - inheriting allocator functions
129{
130 typedef std::conditional_t<(sizeof(element_type) > 10 || alignof(element_type) > 10), uint_least16_t, uint_least8_t> skipfield_type;
131
132public:
133 // Standard container typedefs:
134 typedef element_type value_type;
135 typedef typename std::allocator_traits<allocator_type>::size_type size_type;
136 typedef typename std::allocator_traits<allocator_type>::difference_type difference_type;
137 typedef element_type & reference;
138 typedef const element_type & const_reference;
139 typedef typename std::allocator_traits<allocator_type>::pointer pointer;
140 typedef typename std::allocator_traits<allocator_type>::const_pointer const_pointer;
141
142 // Iterator forward declarations:
143 template <bool is_const> class hive_iterator;
146 friend iterator;
148
149 template <bool is_const_r> class hive_reverse_iterator;
154
155
156
157private:
158
159 // The element as allocated in memory needs to be at-least 2*skipfield_type width in order to support free list indexes in erased element memory space, so:
160 // make the size of this struct the larger of alignof(T), sizeof(T) or 2*skipfield_type (the latter is only relevant for type char/uchar), and
161 // make the alignment alignof(T).
162 // This type is used mainly for correct pointer arithmetic while iterating over elements in memory.
163 struct alignas(alignof(element_type)) aligned_element_struct
164 {
165 // Using char as sizeof is always guaranteed to be 1 byte regardless of the number of bits in a byte on given computer, whereas for example, uint8_t would fail on machines where there are more than 8 bits in a byte eg. Texas Instruments C54x DSPs.
166 char data[
167 (sizeof(element_type) < (sizeof(skipfield_type) * 2)) ?
168 ((sizeof(skipfield_type) * 2) < alignof(element_type) ? alignof(element_type) : (sizeof(skipfield_type) * 2)) :
169 ((sizeof(element_type) < alignof(element_type)) ? alignof(element_type) : sizeof(element_type))
170 ];
171 };
172
173
174 // We combine the allocation of elements and skipfield into one allocation to save performance. This memory must be allocated as an aligned type with the same alignment as T in order for the elements to align with memory boundaries correctly (which won't happen if we allocate as char or uint_8). But the larger the sizeof in the type we use for allocation, the greater the chance of creating a lot of unused memory in the skipfield portion of the allocated block. So we create a type that is sizeof(alignof(T)), as in most cases alignof(T) < sizeof(T). If alignof(t) >= sizeof(t) this makes no difference.
175 struct alignas(alignof(element_type)) aligned_allocation_struct
176 {
177 char data[alignof(element_type)];
178 };
179
180
181 // Calculate the capacity of a group's elements+skipfield memory block when expressed in multiples of the value_type's alignment (rounding up).
182 static size_type get_aligned_block_capacity(const skipfield_type elements_per_group) noexcept
183 {
184 return ((elements_per_group * (sizeof(aligned_element_struct) + sizeof(skipfield_type))) + sizeof(skipfield_type) + sizeof(aligned_allocation_struct) - 1) / sizeof(aligned_allocation_struct);
185 }
186
187
188 // To enable conversion when allocator supplies non-raw pointers:
189 template <class destination_pointer_type, class source_pointer_type>
190 static constexpr destination_pointer_type pointer_cast(const source_pointer_type source_pointer) noexcept
191 {
192 if constexpr (std::is_trivial<destination_pointer_type>::value)
193 {
194 return reinterpret_cast<destination_pointer_type>(std::to_address(source_pointer));
195 }
196 else
197 {
198 return destination_pointer_type(std::to_address(source_pointer));
199 }
200 }
201
202
203
204 // forward declarations for typedefs below
205 struct group;
206 struct item_index_tuple; // for use in sort()
207
208
209 typedef typename std::allocator_traits<allocator_type>::template rebind_alloc<aligned_element_struct> aligned_allocator_type;
210 typedef typename std::allocator_traits<allocator_type>::template rebind_alloc<group> group_allocator_type;
211 typedef typename std::allocator_traits<allocator_type>::template rebind_alloc<skipfield_type> skipfield_allocator_type;
212 typedef typename std::allocator_traits<allocator_type>::template rebind_alloc<aligned_allocation_struct> aligned_struct_allocator_type;
213 typedef typename std::allocator_traits<allocator_type>::template rebind_alloc<item_index_tuple> tuple_allocator_type;
214 typedef typename std::allocator_traits<allocator_type>::template rebind_alloc<unsigned char> uchar_allocator_type;
215
216 typedef typename std::allocator_traits<aligned_allocator_type>::pointer aligned_pointer_type; // pointer to the (potentially overaligned) element type, not the original element type
217 typedef typename std::allocator_traits<group_allocator_type>::pointer group_pointer_type;
218 typedef typename std::allocator_traits<skipfield_allocator_type>::pointer skipfield_pointer_type;
219 typedef typename std::allocator_traits<aligned_struct_allocator_type>::pointer aligned_struct_pointer_type;
220 typedef typename std::allocator_traits<tuple_allocator_type>::pointer tuple_pointer_type;
221
222
223 // group == element memory block + skipfield + block metadata
224 struct group
225 {
226 skipfield_pointer_type skipfield; // Skipfield storage. The element and skipfield arrays are allocated contiguously, in a single allocation, in this implementation, hence the skipfield pointer also functions as a 'one-past-end' pointer for the elements array. There will always be one additional skipfield node allocated compared to the number of elements. This is to ensure a faster ++ iterator operation (fewer checks are required when this is present). The extra node is unused and always zero, but checked, and not having it will result in out-of-bounds memory errors. This is present before elements in the group struct as it is referenced constantly by the ++ operator, hence having it first results in a minor performance increase.
227 group_pointer_type next_group; // Next group in the linked list of all groups. nullptr if no following group. 2nd in struct because it is so frequently used during iteration.
228 aligned_pointer_type const elements; // Element storage.
229 group_pointer_type previous_group; // Previous group in the linked list of all groups. nullptr if no preceding group.
230 skipfield_type free_list_head; // The index of the last erased element in the group. The last erased element will, in turn, contain the number of the index of the next erased element, and so on. If this is == maximum skipfield_type value then free_list is empty ie. no erasures have occurred in the group (or if they have, the erased locations have subsequently been reused via insert/emplace/assign).
231 const skipfield_type capacity; // The element capacity of this particular group - can also be calculated from reinterpret_cast<aligned_pointer_type>(group->skipfield) - group->elements, however this space is effectively free due to struct padding and the sizeof(skipfield_type), and calculating it once is faster in benchmarking.
232 skipfield_type size; // The total number of active elements in group - changes with insert and erase commands - used to check for empty group in erase function, as an indication to remove the group. Also used in combination with capacity to check if group is full, which is used in the next/previous/advance/distance overloads, and range-erase.
233 group_pointer_type erasures_list_next_group, erasures_list_previous_group; // The next and previous groups in the list of groups with erasures ie. with active erased-element free lists. nullptr if no next or previous group.
234 size_type group_number; // Used for comparison (> < >= <= <=>) iterator operators (used by distance function and user).
235
236
237
238 group(aligned_struct_allocator_type &aligned_struct_allocator, const skipfield_type elements_per_group, group_pointer_type const previous):
239 next_group(nullptr),
240 elements(pointer_cast<aligned_pointer_type>(std::allocator_traits<aligned_struct_allocator_type>::allocate(aligned_struct_allocator, get_aligned_block_capacity(elements_per_group), (previous == nullptr) ? 0 : previous->elements))),
241 previous_group(previous),
242 free_list_head(std::numeric_limits<skipfield_type>::max()),
243 capacity(elements_per_group),
244 size(1),
245 erasures_list_next_group(nullptr),
246 erasures_list_previous_group(nullptr),
247 group_number((previous == nullptr) ? 0 : previous->group_number + 1u)
248 {
249 skipfield = pointer_cast<skipfield_pointer_type>(elements + elements_per_group);
250 std::memset(static_cast<void *>(std::to_address(skipfield)), 0, sizeof(skipfield_type) * (static_cast<size_type>(elements_per_group) + 1u));
251 }
252
253
254
255 void reset(const skipfield_type increment, const group_pointer_type next, const group_pointer_type previous, const size_type group_num) noexcept
256 {
257 next_group = next;
258 free_list_head = std::numeric_limits<skipfield_type>::max();
259 previous_group = previous;
260 size = increment;
261 erasures_list_next_group = nullptr;
262 erasures_list_previous_group = nullptr;
263 group_number = group_num;
264
265 std::memset(static_cast<void *>(std::to_address(skipfield)), 0, sizeof(skipfield_type) * static_cast<size_type>(capacity)); // capacity + 1 is not necessary here as the final skipfield node is never written to after initialization
266 }
267 };
268
269
270
271 // Hive member variables:
272
273 iterator end_iterator, begin_iterator;
274 group_pointer_type erasure_groups_head, // Head of doubly-linked list of groups which have erased-element memory locations available for re-use
275 unused_groups_head; // Head of singly-linked list of reserved groups retained by erase()/clear() or created by reserve()
276 size_type total_size, total_capacity;
277 skipfield_type min_block_capacity, max_block_capacity;
278
279 group_allocator_type group_allocator;
280 aligned_struct_allocator_type aligned_struct_allocator;
281 skipfield_allocator_type skipfield_allocator;
282 tuple_allocator_type tuple_allocator;
283
284
285
286 // Adaptive minimum based around aligned size, sizeof(group) and sizeof(hive):
287 static constexpr skipfield_type default_min_block_capacity() noexcept
288 {
289 constexpr skipfield_type adaptive_size = static_cast<skipfield_type>(((sizeof(hive) + sizeof(group)) * 2) / sizeof(aligned_element_struct));
290 constexpr skipfield_type max_block_capacity = default_max_block_capacity(); // Necessary to check against in situations with > 64bit pointer sizes and small sizeof(T)
291 return (8 > adaptive_size) ? 8 : (adaptive_size > max_block_capacity) ? max_block_capacity : adaptive_size;
292 }
293
294
295
296 // Adaptive maximum based on numeric_limits and best outcome from multiple benchmark's (on balance) in terms of memory usage and performance:
297 static constexpr skipfield_type default_max_block_capacity() noexcept
298 {
299 return static_cast<skipfield_type>((std::numeric_limits<skipfield_type>::max() > 8192u) ? 8192u : std::numeric_limits<skipfield_type>::max());
300 }
301
302
303
304 static constexpr hive_limits default_block_capacity_limits() noexcept
305 {
306 return hive_limits(static_cast<size_t>(default_min_block_capacity()), static_cast<size_t>(default_max_block_capacity()));
307 }
308
309
310
311 void check_capacities_conformance(const hive_limits capacities) const
312 {
313 constexpr hive_limits hard_capacities = block_capacity_hard_limits();
314
315 if (capacities.min < hard_capacities.min || capacities.min > capacities.max || capacities.max > hard_capacities.max)
316 {
317 #ifdef PLF_EXCEPTIONS_SUPPORT
318 throw std::length_error("Supplied memory block capacity limits are either invalid or outside of block_capacity_hard_limits()");
319 #else
320 std::terminate();
321 #endif
322 }
323 }
324
325
326
327 void blank() noexcept
328 {
329 if constexpr (std::is_standard_layout<hive>::value && std::allocator_traits<allocator_type>::is_always_equal::value && std::is_trivial<group_pointer_type>::value && std::is_trivial<aligned_pointer_type>::value && std::is_trivial<skipfield_pointer_type>::value)
330 { // If all pointer types are trivial, we can just nuke the member variables from orbit with memset (nullptr is always 0):
331 std::memset(static_cast<void *>(this), 0, offsetof(hive, min_block_capacity));
332 }
333 else
334 {
335 end_iterator.group_pointer = nullptr;
336 end_iterator.element_pointer = nullptr;
337 end_iterator.skipfield_pointer = nullptr;
338 begin_iterator.group_pointer = nullptr;
339 begin_iterator.element_pointer = nullptr;
340 begin_iterator.skipfield_pointer = nullptr;
341 erasure_groups_head = nullptr;
342 unused_groups_head = nullptr;
343 total_size = 0;
344 total_capacity = 0;
345 }
346 }
347
348
349
350public:
351
352 // Default constructors:
353
354 explicit hive(const allocator_type &alloc) noexcept:
355 allocator_type(alloc),
356 erasure_groups_head(nullptr),
357 unused_groups_head(nullptr),
358 total_size(0),
359 total_capacity(0),
360 min_block_capacity(default_min_block_capacity()),
361 max_block_capacity(default_max_block_capacity()),
362 group_allocator(*this),
363 aligned_struct_allocator(*this),
364 skipfield_allocator(*this),
365 tuple_allocator(*this)
366 {}
367
368
369
370 constexpr hive() noexcept(noexcept(allocator_type())) :
371 hive(allocator_type())
372 {}
373
374
375
376 hive(const hive_limits block_limits, const allocator_type &alloc):
377 allocator_type(alloc),
378 erasure_groups_head(nullptr),
379 unused_groups_head(nullptr),
380 total_size(0),
381 total_capacity(0),
382 min_block_capacity(static_cast<skipfield_type>(block_limits.min)),
383 max_block_capacity(static_cast<skipfield_type>(block_limits.max)),
384 group_allocator(*this),
385 aligned_struct_allocator(*this),
386 skipfield_allocator(*this),
387 tuple_allocator(*this)
388 {
389 check_capacities_conformance(block_limits);
390 }
391
392
393
394 explicit hive(const hive_limits block_limits):
395 hive(block_limits, allocator_type())
396 {}
397
398
399
400 // Copy constructors:
401
402 hive(const hive &source, const std::type_identity_t<allocator_type> &alloc):
403 allocator_type(alloc),
404 erasure_groups_head(nullptr),
405 unused_groups_head(nullptr),
406 total_size(0),
407 total_capacity(0),
408 min_block_capacity(static_cast<skipfield_type>((source.min_block_capacity > source.total_size) ? source.min_block_capacity : ((source.total_size > source.max_block_capacity) ? source.max_block_capacity : source.total_size))), // min group size is set to value closest to total number of elements in source hive, in order to not create unnecessary small groups in the range-insert below, then reverts to the original min group size afterwards. This effectively saves a call to reserve.
409 max_block_capacity(source.max_block_capacity),
410 group_allocator(*this),
411 aligned_struct_allocator(*this),
412 skipfield_allocator(*this),
413 tuple_allocator(*this)
414 { // can skip checking for skipfield conformance here as source will have already checked theirs. Same applies for other copy and move constructors below
415 range_assign(source.begin_iterator, source.total_size);
416 min_block_capacity = source.min_block_capacity; // reset to correct value for future operations
417 }
418
419
420
421 hive(const hive &source):
422 hive(source, std::allocator_traits<allocator_type>::select_on_container_copy_construction(source))
423 {}
424
425
426
427 // Move constructors:
428
429 hive(hive &&source, const std::type_identity_t<allocator_type> &alloc):
430 allocator_type(alloc),
431 end_iterator(std::move(source.end_iterator)),
432 begin_iterator(std::move(source.begin_iterator)),
433 erasure_groups_head(std::move(source.erasure_groups_head)),
434 unused_groups_head(std::move(source.unused_groups_head)),
435 total_size(source.total_size),
436 total_capacity(source.total_capacity),
437 min_block_capacity(source.min_block_capacity),
438 max_block_capacity(source.max_block_capacity),
439 group_allocator(*this),
440 aligned_struct_allocator(*this),
441 skipfield_allocator(*this),
442 tuple_allocator(*this)
443 {
444 assert(&source != this);
445 source.blank();
446 }
447
448
449
450 hive(hive &&source) noexcept:
451 allocator_type(std::move(static_cast<allocator_type &>(source))),
452 end_iterator(std::move(source.end_iterator)),
453 begin_iterator(std::move(source.begin_iterator)),
454 erasure_groups_head(std::move(source.erasure_groups_head)),
455 unused_groups_head(std::move(source.unused_groups_head)),
456 total_size(source.total_size),
457 total_capacity(source.total_capacity),
458 min_block_capacity(source.min_block_capacity),
459 max_block_capacity(source.max_block_capacity),
460 group_allocator(*this),
461 aligned_struct_allocator(*this),
462 skipfield_allocator(*this),
463 tuple_allocator(*this)
464 {
465 assert(&source != this);
466 source.blank();
467 }
468
469
470
471 // Fill constructors:
472
473 hive(const size_type fill_number, const element_type &element, const hive_limits block_limits, const allocator_type &alloc = allocator_type()):
474 allocator_type(alloc),
475 erasure_groups_head(nullptr),
476 unused_groups_head(nullptr),
477 total_size(0),
478 total_capacity(0),
479 min_block_capacity(static_cast<skipfield_type>(block_limits.min)),
480 max_block_capacity(static_cast<skipfield_type>(block_limits.max)),
481 group_allocator(*this),
482 aligned_struct_allocator(*this),
483 skipfield_allocator(*this),
484 tuple_allocator(*this)
485 {
486 check_capacities_conformance(block_limits);
487 assign(fill_number, element);
488 }
489
490
491 hive(const size_type fill_number, const element_type &element, const allocator_type &alloc = allocator_type()) :
492 hive(fill_number, element, default_block_capacity_limits(), alloc)
493 {}
494
495
496
497 // Default-value fill constructors:
498
499 hive(const size_type fill_number, const hive_limits block_limits, const allocator_type &alloc = allocator_type()):
500 allocator_type(alloc),
501 erasure_groups_head(nullptr),
502 unused_groups_head(nullptr),
503 total_size(0),
504 total_capacity(0),
505 min_block_capacity(static_cast<skipfield_type>(block_limits.min)),
506 max_block_capacity(static_cast<skipfield_type>(block_limits.max)),
507 group_allocator(*this),
508 aligned_struct_allocator(*this),
509 skipfield_allocator(*this),
510 tuple_allocator(*this)
511 {
512 check_capacities_conformance(block_limits);
513 assign(fill_number, element_type());
514 }
515
516
517
518 hive(const size_type fill_number, const allocator_type &alloc = allocator_type()):
519 hive(fill_number, default_block_capacity_limits(), alloc)
520 {}
521
522
523
524 // Range constructors:
525
526 template<typename iterator_type>
527 hive(const typename std::enable_if_t<!std::numeric_limits<iterator_type>::is_integer, iterator_type> &first, const iterator_type &last, const hive_limits block_limits, const allocator_type &alloc = allocator_type()):
528 allocator_type(alloc),
529 erasure_groups_head(nullptr),
530 unused_groups_head(nullptr),
531 total_size(0),
532 total_capacity(0),
533 min_block_capacity(static_cast<skipfield_type>(block_limits.min)),
534 max_block_capacity(static_cast<skipfield_type>(block_limits.max)),
535 group_allocator(*this),
536 aligned_struct_allocator(*this),
537 skipfield_allocator(*this),
538 tuple_allocator(*this)
539 {
540 check_capacities_conformance(block_limits);
541 assign<iterator_type>(first, last);
542 }
543
544
545
546 template<typename iterator_type>
547 hive(const typename std::enable_if_t<!std::numeric_limits<iterator_type>::is_integer, iterator_type> &first, const iterator_type &last, const allocator_type &alloc = allocator_type()):
548 hive(first, last, default_block_capacity_limits(), alloc)
549 {}
550
551
552
553 // Initializer-list constructors:
554
555 hive(const std::initializer_list<element_type> &element_list, const hive_limits block_limits, const allocator_type &alloc = allocator_type()):
556 allocator_type(alloc),
557 erasure_groups_head(nullptr),
558 unused_groups_head(nullptr),
559 total_size(0),
560 total_capacity(0),
561 min_block_capacity(static_cast<skipfield_type>(block_limits.min)),
562 max_block_capacity(static_cast<skipfield_type>(block_limits.max)),
563 group_allocator(*this),
564 aligned_struct_allocator(*this),
565 skipfield_allocator(*this),
566 tuple_allocator(*this)
567 {
568 check_capacities_conformance(block_limits);
569 range_assign(element_list.begin(), static_cast<size_type>(element_list.size()));
570 }
571
572
573
574 hive(const std::initializer_list<element_type> &element_list, const allocator_type &alloc = allocator_type()):
575 hive(element_list, default_block_capacity_limits(), alloc)
576 {}
577
578
579
580 // Ranges v3 constructors:
581
582 template<class range_type>
583 requires std::ranges::range<range_type>
584 hive(plf::ranges::from_range_t, range_type &&rg, const hive_limits block_limits, const allocator_type &alloc = allocator_type()):
585 allocator_type(alloc),
586 erasure_groups_head(nullptr),
587 unused_groups_head(nullptr),
588 total_size(0),
589 total_capacity(0),
590 min_block_capacity(static_cast<skipfield_type>(block_limits.min)),
591 max_block_capacity(static_cast<skipfield_type>(block_limits.max)),
592 group_allocator(*this),
593 aligned_struct_allocator(*this),
594 skipfield_allocator(*this),
595 tuple_allocator(*this)
596 {
597 check_capacities_conformance(block_limits);
598 range_assign(std::ranges::begin(rg), static_cast<size_type>(std::ranges::distance(rg)));
599 }
600
601
602
603 template<class range_type>
604 requires std::ranges::range<range_type>
605 hive(plf::ranges::from_range_t, range_type &&rg, const allocator_type &alloc = allocator_type()):
606 hive(plf::ranges::from_range, std::move(rg), default_block_capacity_limits(), alloc)
607 {}
608
609
610
611 // Everything else:
612
613 iterator begin() noexcept
614 {
615 return begin_iterator;
616 }
617
618
619
620 const_iterator begin() const noexcept
621 {
622 return begin_iterator;
623 }
624
625
626
627 iterator end() noexcept
628 {
629 return end_iterator;
630 }
631
632
633
634 const_iterator end() const noexcept
635 {
636 return end_iterator;
637 }
638
639
640
641 const_iterator cbegin() const noexcept
642 {
643 return begin_iterator;
644 }
645
646
647
648 const_iterator cend() const noexcept
649 {
650 return end_iterator;
651 }
652
653
654
656 {
657 return (end_iterator.group_pointer != nullptr) ? ++reverse_iterator(end_iterator.group_pointer, end_iterator.element_pointer, end_iterator.skipfield_pointer) : reverse_iterator(begin_iterator.group_pointer, begin_iterator.element_pointer - 1, begin_iterator.skipfield_pointer - 1);
658 }
659
660
661
663 {
664 return crbegin();
665 }
666
667
668
670 {
671 return reverse_iterator(begin_iterator.group_pointer, begin_iterator.element_pointer - 1, begin_iterator.skipfield_pointer - 1);
672 }
673
674
675
677 {
678 return crend();
679 }
680
681
682
684 {
685 return (end_iterator.group_pointer != nullptr) ? ++const_reverse_iterator(end_iterator.group_pointer, end_iterator.element_pointer, end_iterator.skipfield_pointer) : const_reverse_iterator(begin_iterator.group_pointer, begin_iterator.element_pointer - 1, begin_iterator.skipfield_pointer - 1);
686 }
687
688
689
691 {
692 return const_reverse_iterator(begin_iterator.group_pointer, begin_iterator.element_pointer - 1, begin_iterator.skipfield_pointer - 1);
693 }
694
695
696
697 ~hive() noexcept
698 {
699 destroy_all_data();
700 }
701
702
703
704
705private:
706
707 group_pointer_type allocate_new_group(const skipfield_type elements_per_group, group_pointer_type const previous = nullptr)
708 {
709 group_pointer_type const new_group = std::allocator_traits<group_allocator_type>::allocate(group_allocator, 1, previous);
710
711 #ifdef PLF_EXCEPTIONS_SUPPORT
712 try
713 {
714 std::allocator_traits<group_allocator_type>::construct(group_allocator, new_group, aligned_struct_allocator, elements_per_group, previous);
715 }
716 catch (...)
717 {
718 std::allocator_traits<group_allocator_type>::deallocate(group_allocator, new_group, 1);
719 throw;
720 }
721 #else
722 std::allocator_traits<group_allocator_type>::construct(group_allocator, new_group, aligned_struct_allocator, elements_per_group, previous);
723 #endif
724
725
726 return new_group;
727 }
728
729
730
731 void deallocate_group(group_pointer_type const the_group) noexcept
732 {
733 std::allocator_traits<aligned_struct_allocator_type>::deallocate(aligned_struct_allocator, pointer_cast<aligned_struct_pointer_type>(the_group->elements), get_aligned_block_capacity(the_group->capacity));
734 std::allocator_traits<group_allocator_type>::deallocate(group_allocator, the_group, 1);
735 }
736
737
738
739 constexpr void destroy_element(const aligned_pointer_type element) noexcept
740 {
741 if constexpr (!std::is_trivially_destructible<element_type>::value) // to avoid codegen for trivial types
742 {
743 std::allocator_traits<allocator_type>::destroy(*this, pointer_cast<pointer>(element));
744 }
745 }
746
747
748
749 void destroy_group(const aligned_pointer_type end_pointer) noexcept
750 {
751 if constexpr (!std::is_trivially_destructible<element_type>::value)
752 {
753 do
754 {
755 destroy_element(begin_iterator.element_pointer);
756 begin_iterator.element_pointer += static_cast<size_type>(*++begin_iterator.skipfield_pointer) + 1u;
757 begin_iterator.skipfield_pointer += *begin_iterator.skipfield_pointer;
758 } while(begin_iterator.element_pointer != end_pointer);
759 }
760
761 deallocate_group(begin_iterator.group_pointer);
762 }
763
764
765
766 void destroy_all_data() noexcept
767 {
768 if (begin_iterator.group_pointer != nullptr)
769 {
770 end_iterator.group_pointer->next_group = unused_groups_head; // Link used and unused_group lists together
771
772 if constexpr (!std::is_trivially_destructible<element_type>::value)
773 {
774 if (total_size != 0)
775 {
776 while (begin_iterator.group_pointer != end_iterator.group_pointer) // Erase elements without bothering to update skipfield - much faster:
777 {
778 const group_pointer_type next_group = begin_iterator.group_pointer->next_group;
779 destroy_group(pointer_cast<aligned_pointer_type>(begin_iterator.group_pointer->skipfield));
780 begin_iterator.group_pointer = next_group;
781 begin_iterator.element_pointer = next_group->elements + *(next_group->skipfield);
782 begin_iterator.skipfield_pointer = next_group->skipfield + *(next_group->skipfield);
783 }
784
785 destroy_group(end_iterator.element_pointer);
786 begin_iterator.group_pointer = unused_groups_head;
787 }
788 }
789
790 while (begin_iterator.group_pointer != nullptr)
791 {
792 const group_pointer_type next_group = begin_iterator.group_pointer->next_group;
793 deallocate_group(begin_iterator.group_pointer);
794 begin_iterator.group_pointer = next_group;
795 }
796 }
797 }
798
799
800
801 void initialize(const skipfield_type first_group_size)
802 {
803 end_iterator.group_pointer = begin_iterator.group_pointer = allocate_new_group(first_group_size);
804 end_iterator.element_pointer = begin_iterator.element_pointer = begin_iterator.group_pointer->elements;
805 end_iterator.skipfield_pointer = begin_iterator.skipfield_pointer = begin_iterator.group_pointer->skipfield;
806 total_capacity = first_group_size;
807 }
808
809
810
811 void edit_free_list(skipfield_pointer_type const location, const skipfield_type value) noexcept
812 {
813 std::allocator_traits<skipfield_allocator_type>::destroy(skipfield_allocator, location);
814 std::allocator_traits<skipfield_allocator_type>::construct(skipfield_allocator, location, value);
815 }
816
817
818
819 void edit_free_list_prev(aligned_pointer_type const location, const skipfield_type value) noexcept // Write to the 'previous erased element' index in the erased element memory location
820 {
821 edit_free_list(pointer_cast<skipfield_pointer_type>(location), value);
822 }
823
824
825
826 void edit_free_list_next(aligned_pointer_type const location, const skipfield_type value) noexcept // Ditto 'next'
827 {
828 edit_free_list(pointer_cast<skipfield_pointer_type>(location) + 1, value);
829 }
830
831
832
833 void edit_free_list_head(aligned_pointer_type const location, const skipfield_type value) noexcept
834 {
835 skipfield_pointer_type const converted_location = pointer_cast<skipfield_pointer_type>(location);
836 edit_free_list(converted_location, value);
837 edit_free_list(converted_location + 1, std::numeric_limits<skipfield_type>::max());
838 }
839
840
841
842 void update_skipblock(const iterator &new_location, const skipfield_type prev_free_list_index) noexcept
843 {
844 const skipfield_type new_value = static_cast<skipfield_type>(*(new_location.skipfield_pointer) - 1);
845
846 if (new_value != 0) // ie. skipfield was not 1, ie. a single-node skipblock, with no additional nodes to update
847 {
848 // set (new) start and (original) end of skipblock to new value:
849 *(new_location.skipfield_pointer + new_value) = *(new_location.skipfield_pointer + 1) = new_value;
850
851 // transfer free list node to new start node:
852 ++(erasure_groups_head->free_list_head);
853
854 if (prev_free_list_index != std::numeric_limits<skipfield_type>::max()) // ie. not the tail free list node
855 {
856 edit_free_list_next(new_location.group_pointer->elements + prev_free_list_index, erasure_groups_head->free_list_head);
857 }
858
859 edit_free_list_head(new_location.element_pointer + 1, prev_free_list_index);
860 }
861 else // single-node skipblock, remove skipblock
862 {
863 erasure_groups_head->free_list_head = prev_free_list_index;
864
865 if (prev_free_list_index != std::numeric_limits<skipfield_type>::max()) // ie. not the last free list node
866 {
867 edit_free_list_next(new_location.group_pointer->elements + prev_free_list_index, std::numeric_limits<skipfield_type>::max());
868 }
869 else // remove this group from the list of groups with erasures
870 {
871 erasure_groups_head = erasure_groups_head->erasures_list_next_group; // No need to update previous group for new head, as this is never accessed if group == head
872 }
873 }
874
875 *(new_location.skipfield_pointer) = 0;
876 ++(new_location.group_pointer->size);
877
878 if (new_location.group_pointer == begin_iterator.group_pointer && new_location.element_pointer < begin_iterator.element_pointer)
879 { /* ie. begin_iterator was moved forwards as the result of an erasure at some point, this erased element is before the current begin, hence, set current begin iterator to this element */
880 begin_iterator = new_location;
881 }
882
883 ++total_size;
884 }
885
886
887
888 void reset() noexcept
889 {
890 destroy_all_data();
891 blank();
892 }
893
894
895
896 void update_subsequent_group_numbers(size_type current_group_number, group_pointer_type update_group) noexcept
897 {
898 do
899 {
900 update_group->group_number = current_group_number++;
901 update_group = update_group->next_group;
902 } while (update_group != nullptr);
903 }
904
905
906
907 void reset_group_numbers() noexcept
908 {
909 update_subsequent_group_numbers(0, begin_iterator.group_pointer);
910 }
911
912
913
914 void reset_group_numbers_if_necessary() noexcept
915 {
916 if (end_iterator.group_pointer->group_number == std::numeric_limits<size_type>::max())
917 {
918 reset_group_numbers();
919 }
920 }
921
922
923
924 group_pointer_type reuse_unused_group() noexcept
925 {
926 group_pointer_type const next_group = unused_groups_head;
927 unused_groups_head = next_group->next_group;
928 reset_group_numbers_if_necessary();
929 next_group->reset(1, nullptr, end_iterator.group_pointer, end_iterator.group_pointer->group_number + 1u);
930 return next_group;
931 }
932
933
934
935public:
936
937
938 iterator insert(const element_type &element)
939 {
940 if (end_iterator.element_pointer != nullptr)
941 {
942 if (erasure_groups_head == nullptr) // ie. there are no erased elements
943 {
944 if (end_iterator.element_pointer != pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield)) // end_iterator is not at end of block
945 {
946 const iterator return_iterator = end_iterator; // Make copy for return before modifying end_iterator
947
948 #ifdef PLF_EXCEPTIONS_SUPPORT
949 if constexpr (!std::is_nothrow_copy_constructible<element_type>::value)
950 {
951 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), element);
952 ++end_iterator.element_pointer; // Shift the addition outside, avoiding a try-catch block if an exception is thrown during construction
953 }
954 else
955 #endif
956 { // For no good reason this compiles to much faster code under GCC in raw small struct tests - don't need to avoid try-catch here, so can keep the ++ in the first line:
957 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), element);
958 }
959
960 ++(end_iterator.group_pointer->size);
961 ++end_iterator.skipfield_pointer;
962 ++total_size;
963
964 return return_iterator; // return value before incrementation
965 }
966
967 group_pointer_type next_group;
968
969 if (unused_groups_head == nullptr)
970 {
971 const skipfield_type new_group_size = (total_size < static_cast<size_type>(max_block_capacity)) ? static_cast<skipfield_type>(total_size) : max_block_capacity;
972 reset_group_numbers_if_necessary();
973 next_group = allocate_new_group(new_group_size, end_iterator.group_pointer);
974
975 #ifdef PLF_EXCEPTIONS_SUPPORT
976 if constexpr (!std::is_nothrow_copy_constructible<element_type>::value)
977 {
978 try
979 {
980 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(next_group->elements), element);
981 }
982 catch (...)
983 {
984 deallocate_group(next_group);
985 throw;
986 }
987 }
988 else
989 #endif
990 {
991 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(next_group->elements), element);
992 }
993
994 total_capacity += new_group_size;
995 }
996 else
997 {
998 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(unused_groups_head->elements), element);
999 next_group = reuse_unused_group();
1000 }
1001
1002 end_iterator.group_pointer->next_group = next_group;
1003 end_iterator.group_pointer = next_group;
1004 end_iterator.element_pointer = next_group->elements + 1;
1005 end_iterator.skipfield_pointer = next_group->skipfield + 1;
1006 ++total_size;
1007
1008 return iterator(next_group, next_group->elements, next_group->skipfield); /* returns value before incrementation */
1009 }
1010 else // there are erased elements, reuse those memory locations
1011 {
1012 iterator new_location(erasure_groups_head, erasure_groups_head->elements + erasure_groups_head->free_list_head, erasure_groups_head->skipfield + erasure_groups_head->free_list_head);
1013
1014 // We always reuse the element at the start of the skipblock, this is also where the free-list information for that skipblock is stored. Get the previous free-list node's index from this memory space, before we write to our element to it. 'Next' index is always the free_list_head (as represented by the maximum value of the skipfield type) here so we don't need to get it:
1015 const skipfield_type prev_free_list_index = *pointer_cast<skipfield_pointer_type>(new_location.element_pointer);
1016 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(new_location.element_pointer), element);
1017 update_skipblock(new_location, prev_free_list_index);
1018
1019 return new_location;
1020 }
1021 }
1022 else // ie. newly-constructed hive, no insertions yet and no groups
1023 {
1024 initialize(min_block_capacity);
1025
1026 #ifdef PLF_EXCEPTIONS_SUPPORT
1027 if constexpr (!std::is_nothrow_copy_constructible<element_type>::value)
1028 {
1029 try
1030 {
1031 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), element);
1032 }
1033 catch (...)
1034 {
1035 reset();
1036 throw;
1037 }
1038 }
1039 else
1040 #endif
1041 {
1042 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), element);
1043 }
1044
1045 ++end_iterator.skipfield_pointer;
1046 total_size = 1;
1047 return begin_iterator;
1048 }
1049 }
1050
1051
1052
1053 iterator insert(element_type &&element) // The move-insert function is near-identical to the regular insert function, with the exception of the element construction method and is_nothrow tests.
1054 {
1055 if (end_iterator.element_pointer != nullptr)
1056 {
1057 if (erasure_groups_head == nullptr)
1058 {
1059 if (end_iterator.element_pointer != pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield))
1060 {
1061 const iterator return_iterator = end_iterator;
1062
1063 #ifdef PLF_EXCEPTIONS_SUPPORT
1064 if constexpr (!std::is_nothrow_move_constructible<element_type>::value)
1065 {
1066 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), std::move(element));
1067 ++end_iterator.element_pointer;
1068 }
1069 else
1070 #endif
1071 {
1072 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), std::move(element));
1073 }
1074
1075 ++(end_iterator.group_pointer->size);
1076 ++end_iterator.skipfield_pointer;
1077 ++total_size;
1078
1079 return return_iterator;
1080 }
1081
1082 group_pointer_type next_group;
1083
1084 if (unused_groups_head == nullptr)
1085 {
1086 const skipfield_type new_group_size = (total_size < static_cast<size_type>(max_block_capacity)) ? static_cast<skipfield_type>(total_size) : max_block_capacity;
1087 reset_group_numbers_if_necessary();
1088 next_group = allocate_new_group(new_group_size, end_iterator.group_pointer);
1089
1090 #ifdef PLF_EXCEPTIONS_SUPPORT
1091 if constexpr (!std::is_nothrow_move_constructible<element_type>::value)
1092 {
1093 try
1094 {
1095 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(next_group->elements), std::move(element));
1096 }
1097 catch (...)
1098 {
1099 deallocate_group(next_group);
1100 throw;
1101 }
1102 }
1103 else
1104 #endif
1105 {
1106 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(next_group->elements), std::move(element));
1107 }
1108
1109 total_capacity += new_group_size;
1110 }
1111 else
1112 {
1113 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(unused_groups_head->elements), std::move(element));
1114 next_group = reuse_unused_group();
1115 }
1116
1117 end_iterator.group_pointer->next_group = next_group;
1118 end_iterator.group_pointer = next_group;
1119 end_iterator.element_pointer = next_group->elements + 1;
1120 end_iterator.skipfield_pointer = next_group->skipfield + 1;
1121 ++total_size;
1122
1123 return iterator(next_group, next_group->elements, next_group->skipfield); /* returns value before incrementation */
1124 }
1125 else
1126 {
1127 iterator new_location(erasure_groups_head, erasure_groups_head->elements + erasure_groups_head->free_list_head, erasure_groups_head->skipfield + erasure_groups_head->free_list_head);
1128
1129 const skipfield_type prev_free_list_index = *pointer_cast<skipfield_pointer_type>(new_location.element_pointer);
1130 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(new_location.element_pointer), std::move(element));
1131 update_skipblock(new_location, prev_free_list_index);
1132
1133 return new_location;
1134 }
1135 }
1136 else
1137 {
1138 initialize(min_block_capacity);
1139
1140 #ifdef PLF_EXCEPTIONS_SUPPORT
1141 if constexpr (!std::is_nothrow_move_constructible<element_type>::value)
1142 {
1143 try
1144 {
1145 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), std::move(element));
1146 }
1147 catch (...)
1148 {
1149 reset();
1150 throw;
1151 }
1152 }
1153 else
1154 #endif
1155 {
1156 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), std::move(element));
1157 }
1158
1159 ++end_iterator.skipfield_pointer;
1160 total_size = 1;
1161 return begin_iterator;
1162 }
1163 }
1164
1165
1166
1167 template<typename... arguments>
1168 iterator emplace(arguments &&... parameters) // The emplace function is near-identical to the regular insert function, with the exception of the element construction method, and change to is_nothrow tests.
1169 {
1170 if (end_iterator.element_pointer != nullptr)
1171 {
1172 if (erasure_groups_head == nullptr)
1173 {
1174 if (end_iterator.element_pointer != pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield))
1175 {
1176 const iterator return_iterator = end_iterator;
1177
1178 #ifdef PLF_EXCEPTIONS_SUPPORT
1179 if constexpr (!std::is_nothrow_constructible<element_type>::value)
1180 {
1181 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), std::forward<arguments>(parameters) ...);
1182 ++end_iterator.element_pointer;
1183 }
1184 else
1185 #endif
1186 {
1187 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), std::forward<arguments>(parameters) ...);
1188 }
1189
1190 ++(end_iterator.group_pointer->size);
1191 ++end_iterator.skipfield_pointer;
1192 ++total_size;
1193
1194 return return_iterator;
1195 }
1196
1197 group_pointer_type next_group;
1198
1199 if (unused_groups_head == nullptr)
1200 {
1201 const skipfield_type new_group_size = (total_size < static_cast<size_type>(max_block_capacity)) ? static_cast<skipfield_type>(total_size) : max_block_capacity;
1202 reset_group_numbers_if_necessary();
1203 next_group = allocate_new_group(new_group_size, end_iterator.group_pointer);
1204
1205 #ifdef PLF_EXCEPTIONS_SUPPORT
1206 if constexpr (!std::is_nothrow_constructible<element_type>::value)
1207 {
1208 try
1209 {
1210 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(next_group->elements), std::forward<arguments>(parameters) ...);
1211 }
1212 catch (...)
1213 {
1214 deallocate_group(next_group);
1215 throw;
1216 }
1217 }
1218 else
1219 #endif
1220 {
1221 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(next_group->elements), std::forward<arguments>(parameters) ...);
1222 }
1223
1224 total_capacity += new_group_size;
1225 }
1226 else
1227 {
1228 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(unused_groups_head->elements), std::forward<arguments>(parameters) ...);
1229 next_group = reuse_unused_group();
1230 }
1231
1232 end_iterator.group_pointer->next_group = next_group;
1233 end_iterator.group_pointer = next_group;
1234 end_iterator.element_pointer = next_group->elements + 1;
1235 end_iterator.skipfield_pointer = next_group->skipfield + 1;
1236 ++total_size;
1237
1238 return iterator(next_group, next_group->elements, next_group->skipfield); /* returns value before incrementation */
1239 }
1240 else
1241 {
1242 iterator new_location(erasure_groups_head, erasure_groups_head->elements + erasure_groups_head->free_list_head, erasure_groups_head->skipfield + erasure_groups_head->free_list_head);
1243
1244 const skipfield_type prev_free_list_index = *pointer_cast<skipfield_pointer_type>(new_location.element_pointer);
1245 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(new_location.element_pointer), std::forward<arguments>(parameters) ...);
1246 update_skipblock(new_location, prev_free_list_index);
1247
1248 return new_location;
1249 }
1250 }
1251 else
1252 {
1253 initialize(min_block_capacity);
1254
1255 #ifdef PLF_EXCEPTIONS_SUPPORT
1256 if constexpr (!std::is_nothrow_constructible<element_type>::value)
1257 {
1258 try
1259 {
1260 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), std::forward<arguments>(parameters) ...);
1261 }
1262 catch (...)
1263 {
1264 reset();
1265 throw;
1266 }
1267 }
1268 else
1269 #endif
1270 {
1271 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer++), std::forward<arguments>(parameters) ...);
1272 }
1273
1274 ++end_iterator.skipfield_pointer;
1275 total_size = 1;
1276 return begin_iterator;
1277 }
1278 }
1279
1280
1281
1282private:
1283
1284 // For catch blocks in fill() and range_fill()
1285 void recover_from_partial_fill()
1286 {
1287 #ifdef PLF_EXCEPTIONS_SUPPORT
1288 if constexpr ((!std::is_copy_constructible<element_type>::value && !std::is_nothrow_move_constructible<element_type>::value) || !std::is_nothrow_copy_constructible<element_type>::value) // to avoid unnecessary codegen, since this function will never be called if this line isn't true
1289 {
1290 const skipfield_type elements_constructed_before_exception = static_cast<skipfield_type>(end_iterator.element_pointer - end_iterator.group_pointer->elements);
1291 end_iterator.group_pointer->size = elements_constructed_before_exception;
1292 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + elements_constructed_before_exception;
1293 total_size += elements_constructed_before_exception;
1294 unused_groups_head = end_iterator.group_pointer->next_group;
1295 end_iterator.group_pointer->next_group = nullptr;
1296 }
1297 #endif
1298 }
1299
1300
1301
1302 void fill(const element_type &element, const skipfield_type size)
1303 {
1304 #ifdef PLF_EXCEPTIONS_SUPPORT
1305 if constexpr (!std::is_nothrow_copy_constructible<element_type>::value)
1306 {
1307 const aligned_pointer_type fill_end = end_iterator.element_pointer + size;
1308
1309 do
1310 {
1311 try
1312 {
1313 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), element);
1314 }
1315 catch (...)
1316 {
1317 recover_from_partial_fill();
1318 throw;
1319 }
1320 } while (++end_iterator.element_pointer != fill_end);
1321 }
1322 else
1323 #endif
1324 if constexpr (std::is_trivially_copyable<element_type>::value && std::is_trivially_copy_constructible<element_type>::value) // ie. we can get away with using the cheaper fill_n here if there is no chance of an exception being thrown:
1325 {
1326 if constexpr (sizeof(aligned_element_struct) != sizeof(element_type))
1327 {
1328 alignas (alignof(aligned_element_struct)) element_type aligned_copy = element; // to avoid potentially violating memory boundaries in line below, create an initial object copy of same (but aligned) type
1329 std::fill_n(end_iterator.element_pointer, size, *pointer_cast<aligned_pointer_type>(&aligned_copy));
1330 }
1331 else
1332 {
1333 std::fill_n(pointer_cast<pointer>(end_iterator.element_pointer), size, element);
1334 }
1335
1336 end_iterator.element_pointer += size;
1337 }
1338 else // If at least nothrow_constructible (or exceptions disabled), can remove the large block of 'catch' code above
1339 {
1340 const aligned_pointer_type fill_end = end_iterator.element_pointer + size;
1341
1342 do
1343 {
1344 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), element);
1345 } while (++end_iterator.element_pointer != fill_end);
1346 }
1347
1348 total_size += size;
1349 }
1350
1351
1352
1353 // For catch blocks in range_fill_skipblock and fill_skipblock
1354 void recover_from_partial_skipblock_fill(aligned_pointer_type const location, const aligned_pointer_type current_location, skipfield_pointer_type const skipfield_pointer, const skipfield_type prev_free_list_node)
1355 {
1356 #ifdef PLF_EXCEPTIONS_SUPPORT
1357 if constexpr ((!std::is_copy_constructible<element_type>::value && !std::is_nothrow_move_constructible<element_type>::value) || !std::is_nothrow_copy_constructible<element_type>::value) // to avoid unnecessary codegen
1358 {
1359 // Reconstruct existing skipblock and free-list indexes to reflect partially-reused skipblock:
1360 const skipfield_type elements_constructed_before_exception = static_cast<skipfield_type>(current_location - location);
1361 erasure_groups_head->size = static_cast<skipfield_type>(erasure_groups_head->size + elements_constructed_before_exception);
1362 total_size += elements_constructed_before_exception;
1363
1364 std::memset(skipfield_pointer, 0, elements_constructed_before_exception * sizeof(skipfield_type));
1365
1366 edit_free_list_head(location + elements_constructed_before_exception, prev_free_list_node);
1367
1368 const skipfield_type new_skipblock_head_index = static_cast<skipfield_type>((location - erasure_groups_head->elements) + elements_constructed_before_exception);
1369 erasure_groups_head->free_list_head = new_skipblock_head_index;
1370
1371 if (prev_free_list_node != std::numeric_limits<skipfield_type>::max())
1372 {
1373 edit_free_list_next(erasure_groups_head->elements + prev_free_list_node, new_skipblock_head_index);
1374 }
1375 }
1376 #endif
1377 }
1378
1379
1380
1381 void fill_skipblock(const element_type &element, aligned_pointer_type const location, skipfield_pointer_type const skipfield_pointer, const skipfield_type size)
1382 {
1383 #ifdef PLF_EXCEPTIONS_SUPPORT
1384 if constexpr (!std::is_nothrow_copy_constructible<element_type>::value)
1385 {
1386 const aligned_pointer_type fill_end = location + size;
1387 const skipfield_type prev_free_list_node = *pointer_cast<skipfield_pointer_type>(location); // in case of exception, grabbing indexes before free_list node is reused
1388
1389 for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location)
1390 {
1391 try
1392 {
1393 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(current_location), element);
1394 }
1395 catch (...)
1396 {
1397 recover_from_partial_skipblock_fill(location, current_location, skipfield_pointer, prev_free_list_node);
1398 throw;
1399 }
1400 }
1401 }
1402 else
1403 #endif
1404 if constexpr (std::is_trivially_copyable<element_type>::value && std::is_trivially_copy_constructible<element_type>::value)
1405 {
1406 if constexpr (sizeof(aligned_element_struct) != sizeof(element_type))
1407 {
1408 alignas (alignof(aligned_element_struct)) element_type aligned_copy = element;
1409 std::fill_n(location, size, *pointer_cast<aligned_pointer_type>(&aligned_copy));
1410 }
1411 else
1412 {
1413 std::fill_n(pointer_cast<pointer>(location), size, element);
1414 }
1415 }
1416 else
1417 {
1418 const aligned_pointer_type fill_end = location + size;
1419
1420 for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location)
1421 {
1422 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(current_location), element);
1423 }
1424 }
1425
1426 std::memset(skipfield_pointer, 0, size * sizeof(skipfield_type)); // reset skipfield nodes within skipblock to 0
1427 erasure_groups_head->size = static_cast<skipfield_type>(erasure_groups_head->size + size);
1428 total_size += size;
1429 }
1430
1431
1432
1433 void fill_unused_groups(size_type size, const element_type &element, size_type group_number, group_pointer_type previous_group, const group_pointer_type current_group)
1434 {
1435 for (end_iterator.group_pointer = current_group; end_iterator.group_pointer->capacity < size; end_iterator.group_pointer = end_iterator.group_pointer->next_group)
1436 {
1437 const skipfield_type capacity = end_iterator.group_pointer->capacity;
1438 end_iterator.group_pointer->reset(capacity, end_iterator.group_pointer->next_group, previous_group, group_number++);
1439 previous_group = end_iterator.group_pointer;
1440 size -= static_cast<size_type>(capacity);
1441 end_iterator.element_pointer = end_iterator.group_pointer->elements;
1442 fill(element, capacity);
1443 }
1444
1445 // Deal with final group (partial fill)
1446 unused_groups_head = end_iterator.group_pointer->next_group;
1447 end_iterator.group_pointer->reset(static_cast<skipfield_type>(size), nullptr, previous_group, group_number);
1448 end_iterator.element_pointer = end_iterator.group_pointer->elements;
1449 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + size;
1450 fill(element, static_cast<skipfield_type>(size));
1451 }
1452
1453
1454
1455public:
1456
1457 // Fill insert
1458
1459 void insert(size_type size, const element_type &element)
1460 {
1461 if (size == 0)
1462 {
1463 return;
1464 }
1465 else if (size == 1)
1466 {
1467 insert(element);
1468 return;
1469 }
1470
1471 if (total_size == 0)
1472 {
1473 assign(size, element);
1474 return;
1475 }
1476
1477 reserve(total_size + size);
1478
1479 // Use up erased locations if available:
1480 while(erasure_groups_head != nullptr) // skipblock loop: breaks when hive is exhausted of reusable skipblocks, or returns if size == 0
1481 {
1482 aligned_pointer_type const element_pointer = erasure_groups_head->elements + erasure_groups_head->free_list_head;
1483 skipfield_pointer_type const skipfield_pointer = erasure_groups_head->skipfield + erasure_groups_head->free_list_head;
1484 const skipfield_type skipblock_size = *skipfield_pointer;
1485
1486 if (erasure_groups_head == begin_iterator.group_pointer && element_pointer < begin_iterator.element_pointer)
1487 {
1488 begin_iterator.element_pointer = element_pointer;
1489 begin_iterator.skipfield_pointer = skipfield_pointer;
1490 }
1491
1492 if (skipblock_size <= size)
1493 {
1494 erasure_groups_head->free_list_head = *pointer_cast<skipfield_pointer_type>(element_pointer); // set free list head to previous free list node
1495 fill_skipblock(element, element_pointer, skipfield_pointer, skipblock_size);
1496 size -= skipblock_size;
1497
1498 if (erasure_groups_head->free_list_head != std::numeric_limits<skipfield_type>::max()) // ie. there are more skipblocks to be filled in this group
1499 {
1500 edit_free_list_next(erasure_groups_head->elements + erasure_groups_head->free_list_head, std::numeric_limits<skipfield_type>::max()); // set 'next' index of new free list head to 'end' (numeric max)
1501 }
1502 else
1503 {
1504 erasure_groups_head = erasure_groups_head->erasures_list_next_group; // change groups
1505 }
1506
1507 if (size == 0)
1508 {
1509 return;
1510 }
1511 }
1512 else // skipblock is larger than remaining number of elements
1513 {
1514 const skipfield_type prev_index = *pointer_cast<skipfield_pointer_type>(element_pointer); // save before element location is overwritten
1515 fill_skipblock(element, element_pointer, skipfield_pointer, static_cast<skipfield_type>(size));
1516 const skipfield_type new_skipblock_size = static_cast<skipfield_type>(skipblock_size - size);
1517
1518 // Update skipfield (earlier nodes already memset'd in fill_skipblock function):
1519 *(skipfield_pointer + size) = new_skipblock_size;
1520 *(skipfield_pointer + skipblock_size - 1) = new_skipblock_size;
1521 erasure_groups_head->free_list_head = static_cast<skipfield_type>(erasure_groups_head->free_list_head + size); // set free list head to new start node
1522
1523 // Update free list with new head:
1524 edit_free_list_head(element_pointer + size, prev_index);
1525
1526 if (prev_index != std::numeric_limits<skipfield_type>::max())
1527 {
1528 edit_free_list_next(erasure_groups_head->elements + prev_index, erasure_groups_head->free_list_head); // set 'next' index of previous skipblock to new start of skipblock
1529 }
1530
1531 return;
1532 }
1533 }
1534
1535
1536 // Use up remaining available element locations in end group:
1537 // This variable is either the remaining capacity of the group or the number of elements yet to be filled, whichever is smaller:
1538 const skipfield_type group_remainder = static_cast<skipfield_type>(
1539 (static_cast<size_type>(pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer) >= size) ?
1540 size : pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer);
1541
1542 if (group_remainder != 0)
1543 {
1544 fill(element, group_remainder);
1545 end_iterator.group_pointer->size = static_cast<skipfield_type>(end_iterator.group_pointer->size + group_remainder);
1546
1547 if (size == group_remainder) // Ie. remaining capacity was >= remaining elements to be filled
1548 {
1549 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->size;
1550 return;
1551 }
1552
1553 size -= group_remainder;
1554 }
1555
1556
1557 // Use unused groups:
1558 end_iterator.group_pointer->next_group = unused_groups_head;
1559
1560 if ((std::numeric_limits<size_type>::max() - end_iterator.group_pointer->group_number) < size)
1561 {
1562 reset_group_numbers();
1563 }
1564
1565 fill_unused_groups(size, element, end_iterator.group_pointer->group_number + 1u, end_iterator.group_pointer, unused_groups_head);
1566 }
1567
1568
1569
1570private:
1571
1572 template <class iterator_type>
1573 iterator_type range_fill(iterator_type it, const skipfield_type size)
1574 {
1575 const aligned_pointer_type fill_end = end_iterator.element_pointer + size;
1576
1577 #ifdef PLF_EXCEPTIONS_SUPPORT
1578 if constexpr ((!std::is_copy_constructible<element_type>::value && !std::is_nothrow_move_constructible<element_type>::value) || !std::is_nothrow_copy_constructible<element_type>::value)
1579 {
1580 do
1581 {
1582 try
1583 {
1584 if constexpr (std::is_copy_constructible<element_type>::value)
1585 {
1586 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), std::move(*it++));
1587 }
1588 else
1589 {
1590 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), *it++);
1591 }
1592 }
1593 catch (...)
1594 {
1595 recover_from_partial_fill();
1596 throw;
1597 }
1598 } while (++end_iterator.element_pointer != fill_end);
1599 }
1600 else
1601 #endif
1602 if constexpr (std::is_copy_constructible<element_type>::value)
1603 {
1604 do
1605 {
1606 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), *it++);
1607 } while (++end_iterator.element_pointer != fill_end);
1608 }
1609 else // assumes moveable-but-not-copyable type
1610 {
1611 do
1612 {
1613 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(end_iterator.element_pointer), std::move(*it++));
1614 } while (++end_iterator.element_pointer != fill_end);
1615 }
1616
1617 total_size += size;
1618 return it;
1619 }
1620
1621
1622
1623 template <class iterator_type>
1624 iterator_type range_fill_skipblock(iterator_type it, aligned_pointer_type const location, skipfield_pointer_type const skipfield_pointer, const skipfield_type size)
1625 {
1626 const aligned_pointer_type fill_end = location + size;
1627
1628 #ifdef PLF_EXCEPTIONS_SUPPORT
1629 if constexpr ((!std::is_copy_constructible<element_type>::value && !std::is_nothrow_move_constructible<element_type>::value) || !std::is_nothrow_copy_constructible<element_type>::value)
1630 {
1631 const skipfield_type prev_free_list_node = *pointer_cast<skipfield_pointer_type>(location); // in case of exception, grabbing indexes before free_list node is reused
1632
1633 for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location)
1634 {
1635 try
1636 {
1637 if constexpr (std::is_copy_constructible<element_type>::value)
1638 {
1639 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(current_location), std::move(*it++));
1640 }
1641 else
1642 {
1643 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(current_location), *it++);
1644 }
1645 }
1646 catch (...)
1647 {
1648 recover_from_partial_skipblock_fill(location, current_location, skipfield_pointer, prev_free_list_node);
1649 throw;
1650 }
1651 }
1652 }
1653 else
1654 #endif
1655 if constexpr (std::is_copy_constructible<element_type>::value)
1656 {
1657 for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location)
1658 {
1659 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(current_location), *it++);
1660 }
1661 }
1662 else // assumes moveable-but-not-copyable type
1663 {
1664 for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location)
1665 {
1666 std::allocator_traits<allocator_type>::construct(*this, pointer_cast<pointer>(current_location), std::move(*it++));
1667 }
1668 }
1669
1670 std::memset(skipfield_pointer, 0, size * sizeof(skipfield_type)); // reset skipfield nodes within skipblock to 0
1671 erasure_groups_head->size = static_cast<skipfield_type>(erasure_groups_head->size + size);
1672 total_size += size;
1673
1674 return it;
1675 }
1676
1677
1678
1679 template <class iterator_type>
1680 void range_fill_unused_groups(size_type size, iterator_type it, size_type group_number, group_pointer_type previous_group, const group_pointer_type current_group)
1681 {
1682 for (end_iterator.group_pointer = current_group; end_iterator.group_pointer->capacity < size; end_iterator.group_pointer = end_iterator.group_pointer->next_group)
1683 {
1684 const skipfield_type capacity = end_iterator.group_pointer->capacity;
1685 end_iterator.group_pointer->reset(capacity, end_iterator.group_pointer->next_group, previous_group, group_number++);
1686 previous_group = end_iterator.group_pointer;
1687 size -= static_cast<size_type>(capacity);
1688 end_iterator.element_pointer = end_iterator.group_pointer->elements;
1689 it = range_fill(it, capacity);
1690 }
1691
1692 // Deal with final group (partial fill)
1693 unused_groups_head = end_iterator.group_pointer->next_group;
1694 end_iterator.group_pointer->reset(static_cast<skipfield_type>(size), nullptr, previous_group, group_number);
1695 end_iterator.element_pointer = end_iterator.group_pointer->elements;
1696 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + size;
1697 range_fill(it, static_cast<skipfield_type>(size));
1698 }
1699
1700
1701
1702 template <class iterator_type>
1703 void range_insert (iterator_type it, size_type size) // this is near-identical to the fill insert, with the only alteration being incrementing an iterator for construction, rather than using a const element. And the fill etc function calls are changed to range_fill to match this pattern. See fill insert for code explanations
1704 {
1705 if (size == 0)
1706 {
1707 return;
1708 }
1709 else if (size == 1)
1710 {
1711 insert(*it);
1712 return;
1713 }
1714
1715 if (total_size == 0)
1716 {
1717 range_assign(it, size);
1718 return;
1719 }
1720
1721 reserve(total_size + size);
1722
1723 while(erasure_groups_head != nullptr)
1724 {
1725 aligned_pointer_type const element_pointer = erasure_groups_head->elements + erasure_groups_head->free_list_head;
1726 skipfield_pointer_type const skipfield_pointer = erasure_groups_head->skipfield + erasure_groups_head->free_list_head;
1727 const skipfield_type skipblock_size = *skipfield_pointer;
1728
1729 if (erasure_groups_head == begin_iterator.group_pointer && element_pointer < begin_iterator.element_pointer)
1730 {
1731 begin_iterator.element_pointer = element_pointer;
1732 begin_iterator.skipfield_pointer = skipfield_pointer;
1733 }
1734
1735 if (skipblock_size <= size)
1736 {
1737 erasure_groups_head->free_list_head = *pointer_cast<skipfield_pointer_type>(element_pointer);
1738 it = range_fill_skipblock(it, element_pointer, skipfield_pointer, skipblock_size);
1739 size -= skipblock_size;
1740
1741 if (erasure_groups_head->free_list_head != std::numeric_limits<skipfield_type>::max())
1742 {
1743 edit_free_list_next(erasure_groups_head->elements + erasure_groups_head->free_list_head, std::numeric_limits<skipfield_type>::max());
1744 }
1745 else
1746 {
1747 erasure_groups_head = erasure_groups_head->erasures_list_next_group;
1748 }
1749
1750 if (size == 0)
1751 {
1752 return;
1753 }
1754 }
1755 else
1756 {
1757 const skipfield_type prev_index = *pointer_cast<skipfield_pointer_type>(element_pointer);
1758 it = range_fill_skipblock(it, element_pointer, skipfield_pointer, static_cast<skipfield_type>(size));
1759 const skipfield_type new_skipblock_size = static_cast<skipfield_type>(skipblock_size - size);
1760
1761 *(skipfield_pointer + size) = new_skipblock_size;
1762 *(skipfield_pointer + skipblock_size - 1) = new_skipblock_size;
1763 erasure_groups_head->free_list_head = static_cast<skipfield_type>(erasure_groups_head->free_list_head + size);
1764 edit_free_list_head(element_pointer + size, prev_index);
1765
1766 if (prev_index != std::numeric_limits<skipfield_type>::max())
1767 {
1768 edit_free_list_next(erasure_groups_head->elements + prev_index, erasure_groups_head->free_list_head);
1769 }
1770
1771 return;
1772 }
1773 }
1774
1775 const skipfield_type group_remainder = static_cast<skipfield_type>(
1776 (static_cast<size_type>(pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer) >= size) ?
1777 size : pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer);
1778
1779 if (group_remainder != 0)
1780 {
1781 it = range_fill(it, group_remainder);
1782 end_iterator.group_pointer->size = static_cast<skipfield_type>(end_iterator.group_pointer->size + group_remainder);
1783
1784 if (size == group_remainder)
1785 {
1786 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->size;
1787 return;
1788 }
1789
1790 size -= group_remainder;
1791 }
1792
1793
1794 end_iterator.group_pointer->next_group = unused_groups_head;
1795
1796 if ((std::numeric_limits<size_type>::max() - end_iterator.group_pointer->group_number) < size)
1797 {
1798 reset_group_numbers();
1799 }
1800
1801 range_fill_unused_groups(size, it, end_iterator.group_pointer->group_number + 1u, end_iterator.group_pointer, unused_groups_head);
1802 }
1803
1804
1805
1806public:
1807
1808 // Range insert:
1809
1810 template <class iterator_type>
1811 void insert (const typename std::enable_if_t<!std::numeric_limits<iterator_type>::is_integer, iterator_type> first, const iterator_type last)
1812 {
1813 range_insert(first, static_cast<size_type>(std::distance(first, last)));
1814 }
1815
1816
1817
1818 // Range insert, move_iterator overload:
1819
1820 template <class iterator_type>
1821 void insert (const std::move_iterator<iterator_type> first, const std::move_iterator<iterator_type> last)
1822 {
1823 range_insert(first, static_cast<size_type>(std::distance(first.base(), last.base())));
1824 }
1825
1826
1827
1828 // Initializer-list insert:
1829
1830 void insert (const std::initializer_list<element_type> &element_list)
1831 {
1832 range_insert(element_list.begin(), static_cast<size_type>(element_list.size()));
1833 }
1834
1835
1836
1837 template<class range_type>
1838 requires std::ranges::range<range_type>
1839 void insert_range(range_type &&the_range)
1840 {
1841 range_insert(std::ranges::begin(the_range), static_cast<size_type>(std::ranges::distance(the_range)));
1842 }
1843
1844
1845
1846private:
1847
1848 void remove_from_groups_with_erasures_list(const group_pointer_type group_to_remove) noexcept
1849 {
1850 if (group_to_remove != erasure_groups_head)
1851 {
1852 group_to_remove->erasures_list_previous_group->erasures_list_next_group = group_to_remove->erasures_list_next_group;
1853
1854 if (group_to_remove->erasures_list_next_group != nullptr)
1855 {
1856 group_to_remove->erasures_list_next_group->erasures_list_previous_group = group_to_remove->erasures_list_previous_group;
1857 }
1858 }
1859 else
1860 {
1861 erasure_groups_head = erasure_groups_head->erasures_list_next_group;
1862 }
1863 }
1864
1865
1866
1867 void reset_only_group_left(group_pointer_type const group_pointer) noexcept
1868 {
1869 erasure_groups_head = nullptr;
1870 group_pointer->reset(0, nullptr, nullptr, 0);
1871
1872 // Reset begin and end iterators:
1873 end_iterator.element_pointer = begin_iterator.element_pointer = group_pointer->elements;
1874 end_iterator.skipfield_pointer = begin_iterator.skipfield_pointer = group_pointer->skipfield;
1875 }
1876
1877
1878
1879 void add_group_to_unused_groups_list(group * const group_pointer) noexcept
1880 {
1881 group_pointer->next_group = unused_groups_head;
1882 unused_groups_head = group_pointer;
1883 }
1884
1885
1886
1887public:
1888
1889 // must return iterator to subsequent non-erased element (or end()), in case the group containing the element which the iterator points to becomes empty after the erasure, and is thereafter removed from the hive chain, making the current iterator invalid and unusable in a ++ operation:
1890 iterator erase(const const_iterator it) // if uninitialized/invalid iterator supplied, function could generate an exception
1891 {
1892 assert(total_size != 0);
1893 assert(it.group_pointer != nullptr); // ie. not uninitialized iterator
1894 assert(it.element_pointer != pointer_cast<aligned_pointer_type>(it.group_pointer->skipfield)); // ie. != end()
1895 assert(*(it.skipfield_pointer) == 0); // ie. element pointed to by iterator has not been erased previously
1896
1897 if constexpr (!std::is_trivially_destructible<element_type>::value) // Avoid the function call if possible
1898 {
1899 destroy_element(it.element_pointer);
1900 }
1901
1902 --total_size;
1903
1904 if (it.group_pointer->size-- != 1) // ie. non-empty group at this point in time, don't consolidate - optimization note: GCC optimizes postfix - 1 comparison better than prefix - 1 comparison in some cases.
1905 {
1906 // Code logic for following section:
1907 // ---------------------------------
1908 // If current skipfield node has no skipblock on either side, create new skipblock of size 1
1909 // If node only has skipblock on left, set current node and start node of the skipblock to left node value + 1.
1910 // If node only has skipblock on right, make this node the start node of the skipblock and update end node
1911 // If node has skipblocks on left and right, set start node of left skipblock and end node of right skipblock to the values of the left + right nodes + 1
1912
1913 // Optimization explanation:
1914 // The contextual logic below is the same as that in the insert() functions but in this case the value of the current skipfield node will always be
1915 // zero (since it is not yet erased), meaning no additional manipulations are necessary for the previous skipfield node comparison - we only have to check against zero
1916 const char prev_skipfield = *(it.skipfield_pointer - (it.skipfield_pointer != it.group_pointer->skipfield)) != 0; // true if previous node is erased or this node is at beginning of skipfield
1917 const char after_skipfield = *(it.skipfield_pointer + 1) != 0; // NOTE: boundary test (checking against end-of-elements) is able to be skipped due to the extra skipfield node (compared to element field) - which is present to enable faster iterator operator ++ operations
1918 skipfield_type update_value = 1;
1919
1920 if (!(prev_skipfield | after_skipfield)) // no consecutive erased elements
1921 {
1922 *it.skipfield_pointer = 1; // solo skipped node
1923 const skipfield_type index = static_cast<skipfield_type>(it.element_pointer - it.group_pointer->elements);
1924
1925 if (it.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max()) // ie. if this group already has some erased elements
1926 {
1927 edit_free_list_next(it.group_pointer->elements + it.group_pointer->free_list_head, index); // set prev free list head's 'next index' number to the index of the current element
1928 }
1929 else
1930 {
1931 it.group_pointer->erasures_list_next_group = erasure_groups_head; // add it to the groups-with-erasures free list
1932
1933 if (erasure_groups_head != nullptr)
1934 {
1935 erasure_groups_head->erasures_list_previous_group = it.group_pointer;
1936 }
1937
1938 erasure_groups_head = it.group_pointer;
1939 }
1940
1941 edit_free_list_head(it.element_pointer, it.group_pointer->free_list_head);
1942 it.group_pointer->free_list_head = index;
1943 }
1944 else if (prev_skipfield & (!after_skipfield)) // previous erased consecutive elements, none following
1945 {
1946 *(it.skipfield_pointer - *(it.skipfield_pointer - 1)) = *it.skipfield_pointer = static_cast<skipfield_type>(*(it.skipfield_pointer - 1) + 1);
1947 }
1948 else if ((!prev_skipfield) & after_skipfield) // following erased consecutive elements, none preceding
1949 {
1950 const skipfield_type following_value = static_cast<skipfield_type>(*(it.skipfield_pointer + 1) + 1);
1951 *(it.skipfield_pointer + following_value - 1) = *(it.skipfield_pointer) = following_value;
1952
1953 const skipfield_type following_previous = *(pointer_cast<skipfield_pointer_type>(it.element_pointer + 1));
1954 const skipfield_type following_next = *(pointer_cast<skipfield_pointer_type>(it.element_pointer + 1) + 1);
1955 edit_free_list_prev(it.element_pointer, following_previous);
1956 edit_free_list_next(it.element_pointer, following_next);
1957
1958 const skipfield_type index = static_cast<skipfield_type>(it.element_pointer - it.group_pointer->elements);
1959
1960 if (following_previous != std::numeric_limits<skipfield_type>::max())
1961 {
1962 edit_free_list_next(it.group_pointer->elements + following_previous, index); // Set next index of previous free list node to this node's 'next' index
1963 }
1964
1965 if (following_next != std::numeric_limits<skipfield_type>::max())
1966 {
1967 edit_free_list_prev(it.group_pointer->elements + following_next, index); // Set previous index of next free list node to this node's 'previous' index
1968 }
1969 else
1970 {
1971 it.group_pointer->free_list_head = index;
1972 }
1973
1974 update_value = following_value;
1975 }
1976 else // both preceding and following consecutive erased elements - erased element is between two skipblocks
1977 {
1978 *(it.skipfield_pointer) = 1; // This line necessary in order for get_iterator() to work - ensures that erased element skipfield nodes are always non-zero
1979 const skipfield_type preceding_value = *(it.skipfield_pointer - 1);
1980 const skipfield_type following_value = static_cast<skipfield_type>(*(it.skipfield_pointer + 1) + 1);
1981
1982 // Join the skipblocks
1983 *(it.skipfield_pointer - preceding_value) = *(it.skipfield_pointer + following_value - 1) = static_cast<skipfield_type>(preceding_value + following_value);
1984
1985 // Remove the following skipblock's entry from the free list
1986 const skipfield_type following_previous = *(pointer_cast<skipfield_pointer_type>(it.element_pointer + 1));
1987 const skipfield_type following_next = *(pointer_cast<skipfield_pointer_type>(it.element_pointer + 1) + 1);
1988
1989 if (following_previous != std::numeric_limits<skipfield_type>::max())
1990 {
1991 edit_free_list_next(it.group_pointer->elements + following_previous, following_next); // Set next index of previous free list node to this node's 'next' index
1992 }
1993
1994 if (following_next != std::numeric_limits<skipfield_type>::max())
1995 {
1996 edit_free_list_prev(it.group_pointer->elements + following_next, following_previous); // Set previous index of next free list node to this node's 'previous' index
1997 }
1998 else
1999 {
2000 it.group_pointer->free_list_head = following_previous;
2001 }
2002
2003 update_value = following_value;
2004 }
2005
2006 iterator return_iterator(it.group_pointer, it.element_pointer + update_value, it.skipfield_pointer + update_value);
2007
2008 if (return_iterator.element_pointer == pointer_cast<aligned_pointer_type>(it.group_pointer->skipfield) && it.group_pointer != end_iterator.group_pointer)
2009 {
2010 return_iterator.group_pointer = it.group_pointer->next_group;
2011 const aligned_pointer_type elements = return_iterator.group_pointer->elements;
2012 const skipfield_pointer_type skipfield = return_iterator.group_pointer->skipfield;
2013 return_iterator.element_pointer = elements + *skipfield;
2014 return_iterator.skipfield_pointer = skipfield + *skipfield;
2015 }
2016
2017 if (it.element_pointer == begin_iterator.element_pointer) // If original iterator was first element in hive, update it's value with the next non-erased element:
2018 {
2019 begin_iterator = return_iterator;
2020 }
2021
2022 return return_iterator;
2023 }
2024
2025 // else: group is empty, consolidate groups
2026 const bool in_back_block = (it.group_pointer->next_group == nullptr), in_front_block = (it.group_pointer == begin_iterator.group_pointer);
2027
2028 if (in_back_block & in_front_block) // ie. only group in hive
2029 {
2030 // Reset skipfield and free list rather than clearing - leads to fewer allocations/deallocations:
2031 reset_only_group_left(it.group_pointer);
2032 return end_iterator;
2033 }
2034 else if ((!in_back_block) & in_front_block) // ie. Remove first group, change first group to next group
2035 {
2036 it.group_pointer->next_group->previous_group = nullptr; // Cut off this group from the chain
2037 begin_iterator.group_pointer = it.group_pointer->next_group; // Make the next group the first group
2038
2039 if (it.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max()) // Erasures present within the group, ie. was part of the linked list of groups with erasures.
2040 {
2041 remove_from_groups_with_erasures_list(it.group_pointer);
2042 }
2043
2044 total_capacity -= it.group_pointer->capacity;
2045 deallocate_group(it.group_pointer);
2046
2047 // note: end iterator only needs to be changed if the deleted group was the final group in the chain ie. not in this case
2048 begin_iterator.element_pointer = begin_iterator.group_pointer->elements + *(begin_iterator.group_pointer->skipfield); // If the beginning index has been erased (ie. skipfield != 0), skip to next non-erased element
2049 begin_iterator.skipfield_pointer = begin_iterator.group_pointer->skipfield + *(begin_iterator.group_pointer->skipfield);
2050
2051 return begin_iterator;
2052 }
2053 else if (!(in_back_block | in_front_block)) // this is a non-first group but not final group in chain: delete the group, then link previous group to the next group in the chain:
2054 {
2055 it.group_pointer->next_group->previous_group = it.group_pointer->previous_group;
2056 group_pointer_type const return_group = it.group_pointer->previous_group->next_group = it.group_pointer->next_group; // close the chain, removing this group from it
2057
2058 if (it.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max())
2059 {
2060 remove_from_groups_with_erasures_list(it.group_pointer);
2061 }
2062
2063 if (it.group_pointer->next_group != end_iterator.group_pointer)
2064 {
2065 total_capacity -= it.group_pointer->capacity;
2066 deallocate_group(it.group_pointer);
2067 }
2068 else
2069 {
2070 add_group_to_unused_groups_list(it.group_pointer);
2071 }
2072
2073 // Return next group's first non-erased element:
2074 return iterator(return_group, return_group->elements + *(return_group->skipfield), return_group->skipfield + *(return_group->skipfield));
2075 }
2076 else // this is a non-first group and the final group in the chain
2077 {
2078 if (it.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max())
2079 {
2080 remove_from_groups_with_erasures_list(it.group_pointer);
2081 }
2082
2083 it.group_pointer->previous_group->next_group = nullptr;
2084 end_iterator.group_pointer = it.group_pointer->previous_group; // end iterator needs to be changed as element supplied was the back element of the hive
2085 end_iterator.element_pointer = pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield);
2086 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->capacity;
2087
2088 add_group_to_unused_groups_list(it.group_pointer);
2089
2090 return end_iterator;
2091 }
2092 }
2093
2094
2095
2096 // Range erase:
2097
2098 iterator erase(const const_iterator iterator1, const const_iterator iterator2) // if uninitialized/invalid iterators supplied, function could generate an exception. If iterator1 > iterator2, behaviour is undefined.
2099 {
2100 assert(iterator1 <= iterator2);
2101
2102 const_iterator current = iterator1;
2103
2104 if (current.group_pointer != iterator2.group_pointer) // ie. if start and end iterators are in separate groups
2105 {
2106 if (current.element_pointer != current.group_pointer->elements + *(current.group_pointer->skipfield)) // if iterator1 is not the first non-erased element in it's group - most common case
2107 {
2108 size_type number_of_group_erasures = 0;
2109
2110 // Now update skipfield:
2111 const aligned_pointer_type end = pointer_cast<aligned_pointer_type>(iterator1.group_pointer->skipfield);
2112
2113 // Schema: first erase all non-erased elements until end of group & remove all skipblocks post-iterator1 from the free_list. Then, either update preceding skipblock or create new one:
2114
2115 if (std::is_trivially_destructible<element_type>::value && current.group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max())
2116 {
2117 number_of_group_erasures += static_cast<size_type>(end - current.element_pointer);
2118 }
2119 else
2120 {
2121 while (current.element_pointer != end)
2122 {
2123 if (*current.skipfield_pointer == 0)
2124 {
2125 if constexpr (!std::is_trivially_destructible<element_type>::value)
2126 {
2127 destroy_element(current.element_pointer);
2128 }
2129
2130 ++number_of_group_erasures;
2131 ++current.element_pointer;
2132 ++current.skipfield_pointer;
2133 }
2134 else // remove skipblock from group:
2135 {
2136 const skipfield_type prev_free_list_index = *(pointer_cast<skipfield_pointer_type>(current.element_pointer));
2137 const skipfield_type next_free_list_index = *(pointer_cast<skipfield_pointer_type>(current.element_pointer) + 1);
2138
2139 current.element_pointer += *(current.skipfield_pointer);
2140 current.skipfield_pointer += *(current.skipfield_pointer);
2141
2142 if (next_free_list_index == std::numeric_limits<skipfield_type>::max() && prev_free_list_index == std::numeric_limits<skipfield_type>::max()) // if this is the last skipblock in the free list
2143 {
2144 remove_from_groups_with_erasures_list(iterator1.group_pointer); // remove group from list of free-list groups - will be added back in down below, but not worth optimizing for
2145 iterator1.group_pointer->free_list_head = std::numeric_limits<skipfield_type>::max();
2146 number_of_group_erasures += static_cast<size_type>(end - current.element_pointer);
2147
2148 if constexpr (!std::is_trivially_destructible<element_type>::value)
2149 {
2150 while (current.element_pointer != end) // miniloop - avoid checking skipfield for rest of elements in group, as there are no more skipped elements now
2151 {
2152 destroy_element(current.element_pointer++);
2153 }
2154 }
2155
2156 break; // end overall while loop
2157 }
2158 else if (next_free_list_index == std::numeric_limits<skipfield_type>::max()) // if this is the head of the free list
2159 {
2160 current.group_pointer->free_list_head = prev_free_list_index; // make free list head equal to next free list node
2161 edit_free_list_next(current.group_pointer->elements + prev_free_list_index, std::numeric_limits<skipfield_type>::max());
2162 }
2163 else // either a tail or middle free list node
2164 {
2165 edit_free_list_prev(current.group_pointer->elements + next_free_list_index, prev_free_list_index);
2166
2167 if (prev_free_list_index != std::numeric_limits<skipfield_type>::max()) // ie. not the tail free list node
2168 {
2169 edit_free_list_next(current.group_pointer->elements + prev_free_list_index, next_free_list_index);
2170 }
2171 }
2172 }
2173 }
2174 }
2175
2176
2177 const skipfield_type previous_node_value = *(iterator1.skipfield_pointer - 1); // safe to do this here as we've already established that we're not at start of skipfield
2178 const skipfield_type distance_to_end = static_cast<skipfield_type>(end - iterator1.element_pointer);
2179
2180 if (previous_node_value == 0) // no previous skipblock
2181 {
2182 *iterator1.skipfield_pointer = distance_to_end; // set start node value
2183 *(iterator1.skipfield_pointer + distance_to_end - 1) = distance_to_end; // set end node value
2184
2185 const skipfield_type index = static_cast<skipfield_type>(iterator1.element_pointer - iterator1.group_pointer->elements);
2186
2187 if (iterator1.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max()) // ie. if this group already has some erased elements
2188 {
2189 edit_free_list_next(iterator1.group_pointer->elements + iterator1.group_pointer->free_list_head, index); // set prev free list head's 'next index' number to the index of the iterator1 element
2190 }
2191 else
2192 {
2193 iterator1.group_pointer->erasures_list_next_group = erasure_groups_head; // add it to the groups-with-erasures free list
2194
2195 if (erasure_groups_head != nullptr)
2196 {
2197 erasure_groups_head->erasures_list_previous_group = iterator1.group_pointer;
2198 }
2199
2200 erasure_groups_head = iterator1.group_pointer;
2201 }
2202
2203 edit_free_list_head(iterator1.element_pointer, iterator1.group_pointer->free_list_head);
2204 iterator1.group_pointer->free_list_head = index;
2205 }
2206 else
2207 { // update previous skipblock, no need to update free list:
2208 *(iterator1.skipfield_pointer - previous_node_value) = *(iterator1.skipfield_pointer + distance_to_end - 1) = static_cast<skipfield_type>(previous_node_value + distance_to_end);
2209 }
2210
2211 if (distance_to_end > 2) // if the skipblock is longer than 2 nodes, fill in the middle nodes with non-zero values so that get_iterator() and is_active will work
2212 {
2213 std::memset(static_cast<void *>(std::to_address(iterator1.skipfield_pointer) + 1), 1, sizeof(skipfield_type) * (distance_to_end - 2));
2214 }
2215
2216 iterator1.group_pointer->size = static_cast<skipfield_type>(iterator1.group_pointer->size - number_of_group_erasures);
2217 total_size -= number_of_group_erasures;
2218
2219 current.group_pointer = current.group_pointer->next_group;
2220 }
2221
2222
2223 // Intermediate groups:
2224 const group_pointer_type previous_group = current.group_pointer->previous_group;
2225
2226 while (current.group_pointer != iterator2.group_pointer)
2227 {
2228 if constexpr (!std::is_trivially_destructible<element_type>::value)
2229 {
2230 current.element_pointer = current.group_pointer->elements + *(current.group_pointer->skipfield);
2231 current.skipfield_pointer = current.group_pointer->skipfield + *(current.group_pointer->skipfield);
2232 const aligned_pointer_type end = pointer_cast<aligned_pointer_type>(current.group_pointer->skipfield);
2233
2234 do
2235 {
2236 destroy_element(current.element_pointer);
2237 current.element_pointer += static_cast<size_type>(*(++current.skipfield_pointer)) + 1u;
2238 current.skipfield_pointer += *(current.skipfield_pointer);
2239 } while (current.element_pointer != end);
2240 }
2241
2242 if (current.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max())
2243 {
2244 remove_from_groups_with_erasures_list(current.group_pointer);
2245 }
2246
2247 total_size -= current.group_pointer->size;
2248 const group_pointer_type current_group = current.group_pointer;
2249 current.group_pointer = current.group_pointer->next_group;
2250
2251 if (current_group != end_iterator.group_pointer && current_group->next_group != end_iterator.group_pointer)
2252 {
2253 total_capacity -= current_group->capacity;
2254 deallocate_group(current_group);
2255 }
2256 else
2257 {
2258 add_group_to_unused_groups_list(current_group);
2259 }
2260 }
2261
2262 current.element_pointer = current.group_pointer->elements + *(current.group_pointer->skipfield);
2263 current.skipfield_pointer = current.group_pointer->skipfield + *(current.group_pointer->skipfield);
2264 current.group_pointer->previous_group = previous_group; // Join this group to the last non-removed group
2265
2266 if (previous_group != nullptr)
2267 {
2268 previous_group->next_group = current.group_pointer;
2269 }
2270 else
2271 {
2272 begin_iterator = iterator(iterator2.group_pointer, iterator2.element_pointer, iterator2.skipfield_pointer);; // This line is included here primarily to avoid a secondary if statement within the if block below - it will not be needed in any other situation
2273 }
2274 }
2275
2276
2277 // Final group:
2278 // Code explanation:
2279 // If not erasing entire final group, 1. Destruct elements (if non-trivial destructor) and add locations to group free list. 2. process skipfield.
2280 // If erasing entire group, 1. Destruct elements (if non-trivial destructor), 2. if no elements left in hive, reset the group 3. otherwise reset end_iterator and remove group from groups-with-erasures list (if free list of erasures present)
2281
2282 if (current.element_pointer != iterator2.element_pointer) // in case iterator2 was at beginning of it's group - also covers empty range case (first == last)
2283 {
2284 if (iterator2.element_pointer != end_iterator.element_pointer || current.element_pointer != current.group_pointer->elements + *(current.group_pointer->skipfield)) // ie. not erasing entire group. Note: logistically the only way the entire group can be erased here is if iterator2 == end - otherwise would be caught by the if block above. Second condition in this if statement only possibly applies if iterator1.group_pointer == iterator2.group_pointer
2285 {
2286 size_type number_of_group_erasures = 0;
2287 // Schema: first erased all non-erased elements until end of group & remove all skipblocks post-iterator2 from the free_list. Then, either update preceding skipblock or create new one:
2288
2289 const const_iterator current_saved = current;
2290
2291 if (std::is_trivially_destructible<element_type>::value && current.group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max())
2292 {
2293 number_of_group_erasures += static_cast<size_type>(iterator2.element_pointer - current.element_pointer);
2294 }
2295 else
2296 {
2297 while (current.element_pointer != iterator2.element_pointer)
2298 {
2299 if (*current.skipfield_pointer == 0)
2300 {
2301 if constexpr (!std::is_trivially_destructible<element_type>::value)
2302 {
2303 destroy_element(current.element_pointer);
2304 }
2305
2306 ++number_of_group_erasures;
2307 ++current.element_pointer;
2308 ++current.skipfield_pointer;
2309 }
2310 else // remove skipblock from group:
2311 {
2312 const skipfield_type prev_free_list_index = *(pointer_cast<skipfield_pointer_type>(current.element_pointer));
2313 const skipfield_type next_free_list_index = *(pointer_cast<skipfield_pointer_type>(current.element_pointer) + 1);
2314
2315 current.element_pointer += *(current.skipfield_pointer);
2316 current.skipfield_pointer += *(current.skipfield_pointer);
2317
2318 if (next_free_list_index == std::numeric_limits<skipfield_type>::max() && prev_free_list_index == std::numeric_limits<skipfield_type>::max()) // if this is the last skipblock in the free list
2319 {
2320 remove_from_groups_with_erasures_list(iterator2.group_pointer); // remove group from list of free-list groups - will be added back in down below, but not worth optimizing for
2321 iterator2.group_pointer->free_list_head = std::numeric_limits<skipfield_type>::max();
2322 number_of_group_erasures += static_cast<size_type>(iterator2.element_pointer - current.element_pointer);
2323
2324 if constexpr (!std::is_trivially_destructible<element_type>::value)
2325 {
2326 while (current.element_pointer != iterator2.element_pointer)
2327 {
2328 destroy_element(current.element_pointer++);
2329 }
2330 }
2331
2332 break; // end overall while loop
2333 }
2334 else if (next_free_list_index == std::numeric_limits<skipfield_type>::max()) // if this is the head of the free list
2335 {
2336 current.group_pointer->free_list_head = prev_free_list_index;
2337 edit_free_list_next(current.group_pointer->elements + prev_free_list_index, std::numeric_limits<skipfield_type>::max());
2338 }
2339 else
2340 {
2341 edit_free_list_prev(current.group_pointer->elements + next_free_list_index, prev_free_list_index);
2342
2343 if (prev_free_list_index != std::numeric_limits<skipfield_type>::max()) // ie. not the tail free list node
2344 {
2345 edit_free_list_next(current.group_pointer->elements + prev_free_list_index, next_free_list_index);
2346 }
2347 }
2348 }
2349 }
2350 }
2351
2352
2353 const skipfield_type distance_to_iterator2 = static_cast<skipfield_type>(iterator2.element_pointer - current_saved.element_pointer);
2354 const skipfield_type index = static_cast<skipfield_type>(current_saved.element_pointer - iterator2.group_pointer->elements);
2355
2356 if (index == 0 || *(current_saved.skipfield_pointer - 1) == 0) // element is either at start of group or previous skipfield node is 0
2357 {
2358 *(current_saved.skipfield_pointer) = distance_to_iterator2;
2359 *(iterator2.skipfield_pointer - 1) = distance_to_iterator2;
2360
2361 if (iterator2.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max()) // ie. if this group already has some erased elements
2362 {
2363 edit_free_list_next(iterator2.group_pointer->elements + iterator2.group_pointer->free_list_head, index);
2364 }
2365 else
2366 {
2367 iterator2.group_pointer->erasures_list_next_group = erasure_groups_head; // add it to the groups-with-erasures free list
2368
2369 if (erasure_groups_head != nullptr)
2370 {
2371 erasure_groups_head->erasures_list_previous_group = iterator2.group_pointer;
2372 }
2373
2374 erasure_groups_head = iterator2.group_pointer;
2375 }
2376
2377 edit_free_list_head(current_saved.element_pointer, iterator2.group_pointer->free_list_head);
2378 iterator2.group_pointer->free_list_head = index;
2379 }
2380 else // If iterator 1 & 2 are in same group, but iterator 1 was not at start of group, and previous skipfield node is an end node in a skipblock:
2381 {
2382 // Just update existing skipblock, no need to create new free list node:
2383 const skipfield_type prev_node_value = *(current_saved.skipfield_pointer - 1);
2384 *(current_saved.skipfield_pointer - prev_node_value) = static_cast<skipfield_type>(prev_node_value + distance_to_iterator2);
2385 *(iterator2.skipfield_pointer - 1) = static_cast<skipfield_type>(prev_node_value + distance_to_iterator2);
2386 }
2387
2388
2389 if (distance_to_iterator2 > 2) // if the skipblock is longer than 2 nodes, fill in the middle nodes with non-zero values so that get_iterator() and is_active() will work
2390 {
2391 std::memset(static_cast<void *>(std::to_address(current_saved.skipfield_pointer) + 1), 1, sizeof(skipfield_type) * (distance_to_iterator2 - 2));
2392 }
2393
2394 if (iterator1.element_pointer == begin_iterator.element_pointer)
2395 {
2396 begin_iterator = iterator(iterator2.group_pointer, iterator2.element_pointer, iterator2.skipfield_pointer);
2397 }
2398
2399 iterator2.group_pointer->size = static_cast<skipfield_type>(iterator2.group_pointer->size - number_of_group_erasures);
2400 total_size -= number_of_group_erasures;
2401 }
2402 else // ie. full group erasure
2403 {
2404 if constexpr (!std::is_trivially_destructible<element_type>::value)
2405 {
2406 while(current.element_pointer != iterator2.element_pointer)
2407 {
2408 destroy_element(current.element_pointer);
2409 current.element_pointer += static_cast<size_type>(*(++current.skipfield_pointer)) + 1u;
2410 current.skipfield_pointer += *(current.skipfield_pointer);
2411 }
2412 }
2413
2414 if ((total_size -= current.group_pointer->size) != 0) // ie. previous_group != nullptr - hive is not empty
2415 {
2416 if (current.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max())
2417 {
2418 remove_from_groups_with_erasures_list(current.group_pointer);
2419 }
2420
2421 current.group_pointer->previous_group->next_group = current.group_pointer->next_group;
2422
2423 end_iterator.group_pointer = current.group_pointer->previous_group;
2424 end_iterator.element_pointer = pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield);
2425 end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->capacity;
2426 add_group_to_unused_groups_list(current.group_pointer);
2427 }
2428 else // ie. hive is now empty
2429 {
2430 // Reset skipfield and free list rather than clearing - leads to fewer allocations/deallocations:
2431 reset_only_group_left(current.group_pointer);
2432 }
2433
2434 return end_iterator;
2435 }
2436 }
2437
2438 return iterator(iterator2.group_pointer, iterator2.element_pointer, iterator2.skipfield_pointer);
2439 }
2440
2441
2442
2443private:
2444
2445 void prepare_groups_for_assign(const size_type size)
2446 {
2447 // Destroy all elements if non-trivial:
2448 if constexpr (!std::is_trivially_destructible<element_type>::value)
2449 {
2450 for (iterator current = begin_iterator; current != end_iterator; ++current)
2451 {
2452 destroy_element(current.element_pointer);
2453 }
2454 }
2455
2456 if (size < total_capacity && (total_capacity - size) >= min_block_capacity)
2457 {
2458 size_type difference = total_capacity - size;
2459 end_iterator.group_pointer->next_group = unused_groups_head;
2460
2461 // Remove surplus groups which're under the difference limit:
2462 group_pointer_type current_group = begin_iterator.group_pointer, previous_group = nullptr;
2463
2464 do
2465 {
2466 const group_pointer_type next_group = current_group->next_group;
2467
2468 if (current_group->capacity <= difference)
2469 { // Remove group:
2470 difference -= current_group->capacity;
2471 total_capacity -= current_group->capacity;
2472 deallocate_group(current_group);
2473
2474 if (current_group == begin_iterator.group_pointer)
2475 {
2476 begin_iterator.group_pointer = next_group;
2477 }
2478 }
2479 else
2480 {
2481 if (previous_group != nullptr)
2482 {
2483 previous_group->next_group = current_group;
2484 }
2485
2486 previous_group = current_group;
2487 }
2488
2489 current_group = next_group;
2490 } while (current_group != nullptr);
2491
2492 previous_group->next_group = nullptr;
2493 }
2494 else
2495 {
2496 if (size > total_capacity)
2497 {
2498 reserve(size);
2499 }
2500
2501 // Join all unused_groups to main chain:
2502 end_iterator.group_pointer->next_group = unused_groups_head;
2503 }
2504
2505 begin_iterator.element_pointer = begin_iterator.group_pointer->elements;
2506 begin_iterator.skipfield_pointer = begin_iterator.group_pointer->skipfield;
2507 erasure_groups_head = nullptr;
2508 total_size = 0;
2509 }
2510
2511
2512public:
2513
2514
2515 // Fill assign:
2516
2517 void assign(const size_type size, const element_type &element)
2518 {
2519 if (size == 0)
2520 {
2521 reset();
2522 return;
2523 }
2524
2525 prepare_groups_for_assign(size);
2526 fill_unused_groups(size, element, 0, nullptr, begin_iterator.group_pointer);
2527 }
2528
2529
2530
2531private:
2532
2533 // Range assign core:
2534
2535 template <class iterator_type>
2536 void range_assign(const iterator_type it, const size_type size)
2537 {
2538 if (size == 0)
2539 {
2540 reset();
2541 return;
2542 }
2543
2544 prepare_groups_for_assign(size);
2545 range_fill_unused_groups(size, it, 0, nullptr, begin_iterator.group_pointer);
2546 }
2547
2548
2549
2550
2551public:
2552
2553 // Range assign:
2554
2555 template <class iterator_type>
2556 void assign(const typename std::enable_if_t<!std::numeric_limits<iterator_type>::is_integer, iterator_type> first, const iterator_type last)
2557 {
2558 range_assign(first, static_cast<size_type>(std::distance(first, last)));
2559 }
2560
2561
2562
2563 // Range assign, move_iterator overload:
2564
2565 template <class iterator_type>
2566 void assign (const std::move_iterator<iterator_type> first, const std::move_iterator<iterator_type> last)
2567 {
2568 range_assign(first, static_cast<size_type>(std::distance(first.base(),last.base())));
2569 }
2570
2571
2572
2573 // Initializer-list assign:
2574
2575 void assign(const std::initializer_list<element_type> &element_list)
2576 {
2577 range_assign(element_list.begin(), static_cast<size_type>(element_list.size()));
2578 }
2579
2580
2581
2582 template<class range_type>
2583 requires std::ranges::range<range_type>
2584 void assign_range(range_type &&the_range)
2585 {
2586 range_assign(std::ranges::begin(the_range), static_cast<size_type>(std::ranges::distance(the_range)));
2587 }
2588
2589
2590
2591 [[nodiscard]] bool empty() const noexcept
2592 {
2593 return total_size == 0;
2594 }
2595
2596
2597
2598 size_type size() const noexcept
2599 {
2600 return total_size;
2601 }
2602
2603
2604
2605 size_type max_size() const noexcept
2606 {
2607 return std::allocator_traits<allocator_type>::max_size(*this);
2608 }
2609
2610
2611
2612 size_type capacity() const noexcept
2613 {
2614 return total_capacity;
2615 }
2616
2617
2618
2619private:
2620
2621 // get all elements contiguous in memory and shrink to fit, remove erasures and erasure free lists. Invalidates all iterators and pointers to elements.
2622 void consolidate()
2623 {
2624 #ifdef PLF_EXCEPTIONS_SUPPORT
2625 if constexpr (!(std::is_nothrow_move_constructible<element_type>::value && std::is_nothrow_move_assignable<element_type>::value))
2626 {
2627 hive temp(*this);
2628 swap(temp);
2629 }
2630 else
2631 #endif
2632 {
2633 hive temp(hive_limits(min_block_capacity, max_block_capacity));
2634 temp.range_assign(std::make_move_iterator(begin_iterator), total_size);
2635 *this = std::move(temp); // Avoid generating 2nd temporary
2636 }
2637 }
2638
2639
2640
2641public:
2642
2643
2644 void reshape(const hive_limits block_limits)
2645 {
2646 check_capacities_conformance(block_limits);
2647 #ifdef PLF_EXCEPTIONS_SUPPORT
2648 const skipfield_type original_min = min_block_capacity, original_max = max_block_capacity;
2649 #endif
2650 min_block_capacity = static_cast<skipfield_type>(block_limits.min);
2651 max_block_capacity = static_cast<skipfield_type>(block_limits.max);
2652
2653 // Need to check all group sizes here, because splice might append smaller blocks to the end of a larger block:
2654 for (group_pointer_type current_group = begin_iterator.group_pointer; current_group != nullptr; current_group = current_group->next_group)
2655 {
2656 if (current_group->capacity < min_block_capacity || current_group->capacity > max_block_capacity)
2657 {
2658 if constexpr (!(std::is_copy_constructible<element_type>::value || std::is_move_constructible<element_type>::value))
2659 {
2660 throw std::length_error("Current hive's block capacities do not fit within the supplied block limits, therefore reallocation of elements must occur, however user is using a non-copy-constructible/non-move-constructible type.");
2661 }
2662 else
2663 {
2664 #ifdef PLF_EXCEPTIONS_SUPPORT
2665 try
2666 {
2667 consolidate();
2668 }
2669 catch(...)
2670 {
2671 min_block_capacity = original_min;
2672 max_block_capacity = original_max;
2673 throw;
2674 }
2675 #else
2676 consolidate();
2677 #endif
2678 }
2679
2680 return;
2681 }
2682 }
2683
2684 // If a consolidation or throw has not occured, process reserved/unused groups:
2685 for (group_pointer_type current_group = unused_groups_head, previous_group = nullptr; current_group != nullptr;)
2686 {
2687 const group_pointer_type next_group = current_group->next_group;
2688
2689 if (current_group->capacity < min_block_capacity || current_group->capacity > max_block_capacity)
2690 {
2691 total_capacity -= current_group->capacity;
2692 deallocate_group(current_group);
2693
2694 if (previous_group == nullptr)
2695 {
2696 unused_groups_head = next_group;
2697 }
2698 else
2699 {
2700 previous_group->next_group = next_group;
2701 }
2702 }
2703 else
2704 {
2705 previous_group = current_group;
2706 }
2707
2708 current_group = next_group;
2709 }
2710 }
2711
2712
2713
2715 {
2716 return hive_limits(static_cast<size_t>(min_block_capacity), static_cast<size_t>(max_block_capacity));
2717 }
2718
2719
2720
2721 static constexpr hive_limits block_capacity_hard_limits() noexcept
2722 {
2723 return hive_limits(3, std::numeric_limits<skipfield_type>::max());
2724 }
2725
2726
2727
2728 static constexpr size_type max_block_capacity_per_allocation(const size_type allocation_amount) noexcept
2729 {
2730 // Get a rough approximation of the number of elements + skipfield units we can fit in the amount expressed:
2731 size_type num_units = allocation_amount / (sizeof(aligned_element_struct) + sizeof(skipfield_type));
2732
2733 // Truncate the amount to the implementation's hard block capacity max limit:
2734 if (num_units > std::numeric_limits<skipfield_type>::max())
2735 {
2736 num_units = std::numeric_limits<skipfield_type>::max();
2737 }
2738
2739 // Adjust num_units downward based on (a) the additional skipfield node necessary per-block in this implementation and
2740 // (b) any additional memory waste required in order to allocate the skipfield in multiples of the element type's alignof:
2741 if (( /* Explanation: elements and skipfield are allocated in a single allocation to save performance.
2742 In order for the elements to be correctly aligned in memory, this single allocation is aligned to the alignof
2743 the element type, so the first line below is the allocation amount in bytes required for the skipfield
2744 when allocated in multiples of the element type's alignof. The + sizeof(skipfield_type) adds the additional skipfield node
2745 as mentioned, and the (num_units + 1) minus 1 byte rounds up the integer division: */
2746 ((((num_units + 1) * sizeof(aligned_allocation_struct)) - 1 + sizeof(skipfield_type)) / sizeof(aligned_allocation_struct))
2747 /* the second line is the amount of memory in bytes necessary for the elements themselves: */
2748 + (num_units * sizeof(aligned_element_struct)))
2749 /* then we compare against the desired allocation amount: */
2750 > allocation_amount)
2751 {
2752 --num_units; // In this implementation it is not possible for the necessary adjustment to be greater than 1 element+skipfield sizeof
2753 }
2754
2755 return num_units;
2756 }
2757
2758
2759
2760 void clear() noexcept
2761 {
2762 if (total_size == 0)
2763 {
2764 return;
2765 }
2766
2767 // Destroy all elements if element type is non-trivial:
2768 if constexpr (!std::is_trivially_destructible<element_type>::value)
2769 {
2770 for (iterator current = begin_iterator; current != end_iterator; ++current)
2771 {
2772 destroy_element(current.element_pointer);
2773 }
2774 }
2775
2776 if (begin_iterator.group_pointer != end_iterator.group_pointer)
2777 { // Move all other groups onto the unused_groups list
2778 end_iterator.group_pointer->next_group = unused_groups_head;
2779 unused_groups_head = begin_iterator.group_pointer->next_group;
2780 end_iterator.group_pointer = begin_iterator.group_pointer; // other parts of iterator reset in the function below
2781 }
2782
2783 reset_only_group_left(begin_iterator.group_pointer);
2784 erasure_groups_head = nullptr;
2785 total_size = 0;
2786 }
2787
2788
2789
2790 hive & operator = (const hive &source)
2791 {
2792 assert(&source != this);
2793
2794 if constexpr (std::allocator_traits<allocator_type>::propagate_on_container_copy_assignment::value)
2795 {
2796 allocator_type source_allocator(source);
2797
2798 if(!std::allocator_traits<allocator_type>::is_always_equal::value && static_cast<allocator_type &>(*this) != source_allocator)
2799 { // Deallocate existing blocks as source allocator is not necessarily able to do so
2800 reset();
2801 }
2802
2803 static_cast<allocator_type &>(*this) = std::move(source_allocator);
2804 // Reconstruct rebinds:
2805 group_allocator = group_allocator_type(*this);
2806 aligned_struct_allocator = aligned_struct_allocator_type(*this);
2807 skipfield_allocator = skipfield_allocator_type(*this);
2808 tuple_allocator = tuple_allocator_type(*this);
2809 }
2810
2811 range_assign(source.begin_iterator, source.total_size);
2812 return *this;
2813 }
2814
2815
2816
2817 // Move assignment
2818 hive & operator = (hive &&source) noexcept(std::allocator_traits<allocator_type>::propagate_on_container_move_assignment::value || std::allocator_traits<allocator_type>::is_always_equal::value)
2819 {
2820 assert(&source != this);
2821 destroy_all_data();
2822
2823 if (std::allocator_traits<allocator_type>::propagate_on_container_move_assignment::value || std::allocator_traits<allocator_type>::is_always_equal::value || static_cast<allocator_type &>(*this) == static_cast<allocator_type &>(source))
2824 {
2825 if constexpr ((std::is_trivially_copyable<allocator_type>::value || std::allocator_traits<allocator_type>::is_always_equal::value) &&
2826 std::is_trivial<group_pointer_type>::value && std::is_trivial<aligned_pointer_type>::value && std::is_trivial<skipfield_pointer_type>::value)
2827 {
2828 std::memcpy(static_cast<void *>(this), &source, sizeof(hive));
2829 }
2830 else
2831 {
2832 end_iterator = std::move(source.end_iterator);
2833 begin_iterator = std::move(source.begin_iterator);
2834 erasure_groups_head = std::move(source.erasure_groups_head);
2835 unused_groups_head = std::move(source.unused_groups_head);
2836 total_size = source.total_size;
2837 total_capacity = source.total_capacity;
2838 min_block_capacity = source.min_block_capacity;
2839 max_block_capacity = source.max_block_capacity;
2840
2841 if constexpr(std::allocator_traits<allocator_type>::propagate_on_container_move_assignment::value)
2842 {
2843 static_cast<allocator_type &>(*this) = std::move(static_cast<allocator_type &>(source));
2844 // Reconstruct rebinds:
2845 group_allocator = group_allocator_type(*this);
2846 aligned_struct_allocator = aligned_struct_allocator_type(*this);
2847 skipfield_allocator = skipfield_allocator_type(*this);
2848 tuple_allocator = tuple_allocator_type(*this);
2849 }
2850 }
2851 }
2852 else // Allocator isn't movable so move elements from source and deallocate the source's blocks:
2853 {
2854 range_assign(std::make_move_iterator(source.begin_iterator), source.total_size);
2855 source.destroy_all_data();
2856 }
2857
2858 source.blank();
2859 return *this;
2860 }
2861
2862
2863
2864 hive & operator = (const std::initializer_list<element_type> &element_list)
2865 {
2866 range_assign(element_list.begin(), static_cast<size_type>(element_list.size()));
2867 return *this;
2868 }
2869
2870
2871
2873 {
2874 if (total_size == total_capacity)
2875 {
2876 return;
2877 }
2878 else if (total_size == 0)
2879 {
2880 reset();
2881 return;
2882 }
2883
2884 consolidate();
2885 }
2886
2887
2888
2889 void trim_capacity() noexcept
2890 {
2891 if (end_iterator.element_pointer == nullptr) // empty hive
2892 {
2893 return;
2894 }
2895
2896 while(unused_groups_head != nullptr)
2897 {
2898 total_capacity -= unused_groups_head->capacity;
2899 const group_pointer_type next_group = unused_groups_head->next_group;
2900 deallocate_group(unused_groups_head);
2901 unused_groups_head = next_group;
2902 }
2903
2904 if (begin_iterator.element_pointer == end_iterator.element_pointer) // ie. clear() has been called prior
2905 {
2906 deallocate_group(begin_iterator.group_pointer);
2907 blank();
2908 }
2909 }
2910
2911
2912
2913 void trim_capacity(const size_type capacity_retain) noexcept
2914 {
2915 const size_type capacity_difference = total_capacity - capacity_retain;
2916
2917 if (end_iterator.element_pointer == nullptr || total_capacity <= capacity_retain || total_size >= capacity_retain || capacity_difference < min_block_capacity)
2918 {
2919 return;
2920 }
2921
2922 size_type number_of_elements_to_remove = capacity_difference;
2923
2924 for (group_pointer_type current_group = unused_groups_head, previous_group = nullptr; current_group != nullptr;)
2925 {
2926 const group_pointer_type next_group = current_group->next_group;
2927
2928 if (number_of_elements_to_remove >= current_group->capacity)
2929 {
2930 number_of_elements_to_remove -= current_group->capacity;
2931 deallocate_group(current_group);
2932
2933 if (previous_group == nullptr)
2934 {
2935 unused_groups_head = next_group;
2936 }
2937 else
2938 {
2939 previous_group->next_group = next_group;
2940 }
2941
2942 if (number_of_elements_to_remove < min_block_capacity)
2943 {
2944 break;
2945 }
2946 }
2947 else
2948 {
2949 previous_group = current_group;
2950 }
2951
2952 current_group = next_group;
2953 }
2954
2955
2956 if (begin_iterator.element_pointer == end_iterator.element_pointer) // ie. clear() has been called prior
2957 {
2958 if (number_of_elements_to_remove >= begin_iterator.group_pointer->capacity)
2959 {
2960 number_of_elements_to_remove -= begin_iterator.group_pointer->capacity;
2961 deallocate_group(begin_iterator.group_pointer);
2962
2963 if (unused_groups_head != nullptr) // some of the reserved blocks were not removed as they were too large, so use one of these to make the new begin group
2964 {
2965 begin_iterator.group_pointer = unused_groups_head;
2966 begin_iterator.element_pointer = unused_groups_head->elements;
2967 begin_iterator.skipfield_pointer = unused_groups_head->skipfield;
2968 end_iterator = begin_iterator;
2969
2970 unused_groups_head = unused_groups_head->next_group;
2971 begin_iterator.group_pointer->next_group = nullptr;
2972 }
2973 else
2974 {
2975 blank();
2976 return;
2977 }
2978 }
2979 }
2980
2981 total_capacity -= capacity_difference - number_of_elements_to_remove;
2982 }
2983
2984
2985
2986 void reserve(size_type new_capacity)
2987 {
2988 if (new_capacity == 0 || new_capacity <= total_capacity) // We already have enough space allocated
2989 {
2990 return;
2991 }
2992
2993 if (new_capacity > max_size())
2994 {
2995 #ifdef PLF_EXCEPTIONS_SUPPORT
2996 throw std::length_error("Capacity requested via reserve() greater than max_size()");
2997 #else
2998 std::terminate();
2999 #endif
3000 }
3001
3002 new_capacity -= total_capacity;
3003
3004 size_type number_of_max_groups = new_capacity / max_block_capacity;
3005 skipfield_type remainder = static_cast<skipfield_type>(new_capacity - (number_of_max_groups * max_block_capacity));
3006
3007
3008 if (remainder == 0)
3009 {
3010 remainder = max_block_capacity;
3011 --number_of_max_groups;
3012 }
3013 else if (remainder < min_block_capacity)
3014 {
3015 remainder = min_block_capacity;
3016 }
3017
3018
3019 group_pointer_type current_group, first_unused_group;
3020
3021 if (begin_iterator.group_pointer == nullptr) // Most common scenario - empty hive
3022 {
3023 initialize(remainder);
3024 begin_iterator.group_pointer->size = 0; // 1 by default in initialize function (optimised for insert())
3025
3026 if (number_of_max_groups == 0)
3027 {
3028 return;
3029 }
3030 else
3031 {
3032 first_unused_group = current_group = allocate_new_group(max_block_capacity, begin_iterator.group_pointer);
3033 total_capacity += max_block_capacity;
3034 --number_of_max_groups;
3035 }
3036 }
3037 else // Non-empty hive, add first group:
3038 {
3039 if ((std::numeric_limits<size_type>::max() - end_iterator.group_pointer->group_number) < (number_of_max_groups + 1))
3040 {
3041 reset_group_numbers();
3042 }
3043
3044 first_unused_group = current_group = allocate_new_group(remainder, end_iterator.group_pointer);
3045 total_capacity += remainder;
3046 }
3047
3048
3049 while (number_of_max_groups != 0)
3050 {
3051 #ifdef PLF_EXCEPTIONS_SUPPORT
3052 try
3053 {
3054 current_group->next_group = allocate_new_group(max_block_capacity, current_group);
3055 }
3056 catch (...)
3057 {
3058 deallocate_group(current_group->next_group);
3059 current_group->next_group = unused_groups_head;
3060 unused_groups_head = first_unused_group;
3061 throw;
3062 }
3063 #else
3064 current_group->next_group = allocate_new_group(max_block_capacity, current_group);
3065 #endif
3066
3067 current_group = current_group->next_group;
3068 total_capacity += max_block_capacity;
3069 --number_of_max_groups;
3070 }
3071
3072 current_group->next_group = unused_groups_head;
3073 unused_groups_head = first_unused_group;
3074 }
3075
3076
3077
3078private:
3079
3080 template <bool is_const>
3081 hive_iterator<is_const> get_it(const pointer element_pointer) const noexcept
3082 {
3083 const aligned_pointer_type aligned_element_pointer = pointer_cast<aligned_pointer_type>(element_pointer);
3084
3085 // Start with last group first, as will be the largest group in most cases so statistically-higher chance of the element being in it:
3086 aligned_pointer_type end = end_iterator.element_pointer; // This line is important in case the element was in a group which became empty, got moved to the unused_groups list or was deallocated, and then was re-used. It prevents the function from mistakenly giving an iterator which is beyond the back element of the hive.
3087 for (group_pointer_type current_group = end_iterator.group_pointer; current_group != nullptr; current_group = current_group->previous_group, end = pointer_cast<aligned_pointer_type>(current_group->skipfield))
3088 {
3089 if (std::greater_equal()(aligned_element_pointer, current_group->elements) && std::less()(aligned_element_pointer, end))
3090 {
3091 const skipfield_pointer_type skipfield_pointer = current_group->skipfield + (aligned_element_pointer - current_group->elements);
3092 return (*skipfield_pointer == 0) ? hive_iterator<is_const>(current_group, aligned_element_pointer, skipfield_pointer) : hive_iterator<is_const>(end_iterator);
3093 }
3094 }
3095
3096 return end_iterator;
3097 }
3098
3099
3100
3101public:
3102
3103 iterator get_iterator(const pointer element_pointer) noexcept
3104 {
3105 return get_it<false>(element_pointer);
3106 }
3107
3108
3109
3110 const_iterator get_iterator(const const_pointer element_pointer) const noexcept
3111 {
3112 return get_it<true>(const_cast<pointer>(element_pointer));
3113 }
3114
3115
3116
3117 bool is_active(const const_iterator &it) const noexcept
3118 {
3119 // Schema: check (a) that the group the iterator belongs to is still active and not deallocated or in the unused_groups list, then (b) that the element is not erased. (a) prevents an out-of-bounds memory access if the group is deallocated. Same reasoning as get_iterator for loop conditions
3120 aligned_pointer_type end = end_iterator.element_pointer; // Same reasoning as in get_it()
3121
3122 for (group_pointer_type current_group = end_iterator.group_pointer; current_group != nullptr; current_group = current_group->previous_group, end = pointer_cast<aligned_pointer_type>(current_group->skipfield))
3123 {
3124 if (it.group_pointer == current_group && std::greater_equal()(it.element_pointer, current_group->elements) && std::less()(it.element_pointer, end)) // 2nd 2 conditions necessary in case the group contained the element which the iterator points to, has been deallocated from the hive previously, but then the same pointer address is re-supplied via an allocator for a subsequent group allocation (in which case the group's element block memory location may be different)
3125 {
3126 return (*it.skipfield_pointer == 0);
3127 }
3128 }
3129
3130 return false;
3131 }
3132
3133
3134
3135 allocator_type get_allocator() const noexcept
3136 {
3137 return *this;
3138 }
3139
3140
3141
3142 void splice(hive &&source)
3143 {
3144 // Process: if there are unused memory spaces at the end of the current back group of the chain, convert them
3145 // to skipped elements and add the locations to the group's free list.
3146 // Then link the destination's groups to the source's groups and nullify the source.
3147 // If the source has more unused memory spaces in the back group than the destination, swap them before processing to reduce the number of locations added to a free list and also subsequent jumps during iteration.
3148
3149 assert(&source != this);
3150
3151 if (source.total_size == 0)
3152 {
3153 return;
3154 }
3155
3156 // Throw if incompatible group capacity found:
3157 if (source.min_block_capacity < min_block_capacity || source.max_block_capacity > max_block_capacity)
3158 {
3159 for (group_pointer_type current_group = source.begin_iterator.group_pointer; current_group != nullptr; current_group = current_group->next_group)
3160 {
3161 if (current_group->capacity < min_block_capacity || current_group->capacity > max_block_capacity)
3162 {
3163 #ifdef PLF_EXCEPTIONS_SUPPORT
3164 throw std::length_error("A source memory block capacity is outside of the destination's minimum or maximum memory block capacity limits - please change either the source or the destination's min/max block capacity limits using reshape() before calling splice() in this case");
3165 #else
3166 std::terminate();
3167 #endif
3168 }
3169 }
3170 }
3171
3172 // Preserve original unused_groups so that both source and destination retain theirs in case of swap or std::move below:
3173 group_pointer_type const source_unused_groups = source.unused_groups_head, unused_groups_head_original = unused_groups_head;
3174 source.unused_groups_head = nullptr;
3175 unused_groups_head = nullptr;
3176
3177 if (total_size != 0)
3178 {
3179 // If there's more unused element locations in back memory block of destination than in back memory block of source, swap with source to reduce number of skipped elements during iteration:
3180 if ((pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer) > (pointer_cast<aligned_pointer_type>(source.end_iterator.group_pointer->skipfield) - source.end_iterator.element_pointer))
3181 {
3182 swap(source);
3183
3184 // Swap block capacity limits back to where they were:
3185 const skipfield_type source_hive_limits[2] = {source.min_block_capacity, source.max_block_capacity};
3186 source.min_block_capacity = min_block_capacity;
3187 source.max_block_capacity = max_block_capacity;
3188 min_block_capacity = source_hive_limits[0];
3189 max_block_capacity = source_hive_limits[1];
3190 }
3191
3192
3193 // Add source list of groups-with-erasures to destination list of groups-with-erasures:
3194 if (source.erasure_groups_head != nullptr)
3195 {
3196 if (erasure_groups_head != nullptr)
3197 {
3198 group_pointer_type tail_group = erasure_groups_head;
3199
3200 while (tail_group->erasures_list_next_group != nullptr)
3201 {
3202 tail_group = tail_group->erasures_list_next_group;
3203 }
3204
3205 tail_group->erasures_list_next_group = source.erasure_groups_head;
3206 source.erasure_groups_head->erasures_list_previous_group = tail_group;
3207 }
3208 else
3209 {
3210 erasure_groups_head = source.erasure_groups_head;
3211 }
3212 }
3213
3214
3215 const skipfield_type distance_to_end = static_cast<skipfield_type>(pointer_cast<aligned_pointer_type>(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer);
3216
3217 if (distance_to_end != 0) // 0 == edge case
3218 { // Mark unused element memory locations from back group as skipped/erased:
3219 // Update skipfield:
3220 const skipfield_type previous_node_value = *(end_iterator.skipfield_pointer - 1);
3221
3222 if (previous_node_value == 0) // no previous skipblock
3223 {
3224 *end_iterator.skipfield_pointer = distance_to_end;
3225 *(end_iterator.skipfield_pointer + distance_to_end - 1) = distance_to_end;
3226
3227 if (distance_to_end > 2) // make erased middle nodes non-zero for get_iterator and is_active
3228 {
3229 std::memset(static_cast<void *>(end_iterator.skipfield_pointer + 1), 1, sizeof(skipfield_type) * (distance_to_end - 2));
3230 }
3231
3232 const skipfield_type index = static_cast<skipfield_type>(end_iterator.element_pointer - end_iterator.group_pointer->elements);
3233
3234 if (end_iterator.group_pointer->free_list_head != std::numeric_limits<skipfield_type>::max()) // ie. if this group already has some erased elements
3235 {
3236 edit_free_list_next(end_iterator.group_pointer->elements + end_iterator.group_pointer->free_list_head, index); // set prev free list head's 'next index' number to the index of the current element
3237 }
3238 else
3239 {
3240 end_iterator.group_pointer->erasures_list_next_group = erasure_groups_head; // add it to the groups-with-erasures free list
3241
3242 if (erasure_groups_head != nullptr)
3243 {
3244 erasure_groups_head->erasures_list_previous_group = end_iterator.group_pointer;
3245 }
3246
3247 erasure_groups_head = end_iterator.group_pointer;
3248 }
3249
3250 edit_free_list_head(end_iterator.element_pointer, end_iterator.group_pointer->free_list_head);
3251 end_iterator.group_pointer->free_list_head = index;
3252 }
3253 else
3254 { // update previous skipblock, no need to update free list:
3255 *(end_iterator.skipfield_pointer - previous_node_value) = *(end_iterator.skipfield_pointer + distance_to_end - 1) = static_cast<skipfield_type>(previous_node_value + distance_to_end);
3256
3257 if (distance_to_end > 1) // make erased middle nodes non-zero for get_iterator and is_active
3258 {
3259 std::memset(static_cast<void *>(end_iterator.skipfield_pointer), 1, sizeof(skipfield_type) * (distance_to_end - 1));
3260 }
3261 }
3262 }
3263
3264
3265 // Join the destination and source group chains:
3266 end_iterator.group_pointer->next_group = source.begin_iterator.group_pointer;
3267 source.begin_iterator.group_pointer->previous_group = end_iterator.group_pointer;
3268
3269 // Update group numbers if necessary:
3270 if (source.begin_iterator.group_pointer->group_number <= end_iterator.group_pointer->group_number)
3271 {
3272 size_type source_group_count = 0;
3273
3274 for (group_pointer_type current_group = source.begin_iterator.group_pointer; current_group != nullptr; current_group = current_group->next_group, ++source_group_count) {}
3275
3276 if ((std::numeric_limits<size_type>::max() - end_iterator.group_pointer->group_number) >= source_group_count)
3277 {
3278 update_subsequent_group_numbers(end_iterator.group_pointer->group_number + 1u, source.begin_iterator.group_pointer);
3279 }
3280 else
3281 {
3282 reset_group_numbers();
3283 }
3284 }
3285
3286 end_iterator = source.end_iterator;
3287 total_size += source.total_size;
3288 total_capacity += source.total_capacity;
3289 }
3290 else // If *this is empty():
3291 {
3292 *this = std::move(source);
3293
3294 // Add capacity for unused_groups back into *this:
3295 for (group_pointer_type current = unused_groups_head_original; current != nullptr; current = current->next_group)
3296 {
3297 total_capacity += current->capacity;
3298 }
3299 }
3300
3301
3302 // Re-link original unused_groups to *this (in case of swap):
3303 unused_groups_head = unused_groups_head_original;
3304
3305 // Reset source values:
3306 source.blank();
3307
3308 if (source_unused_groups != nullptr) // If there were unused_groups in source, re-link them and remove their capacity count from *this:
3309 {
3310 size_type source_unused_groups_capacity = 0;
3311
3312 // Count capacity in source unused_groups:
3313 for (group_pointer_type current = source_unused_groups; current != nullptr; current = current->next_group)
3314 {
3315 source_unused_groups_capacity += current->capacity;
3316 }
3317
3318 total_capacity -= source_unused_groups_capacity;
3319 source.total_capacity = source_unused_groups_capacity;
3320
3321 // Establish first group from source unused_groups as first active group in source, link rest as reserved groups:
3322 source.unused_groups_head = source_unused_groups->next_group;
3323 source.begin_iterator.group_pointer = source_unused_groups;
3324 source.begin_iterator.element_pointer = source_unused_groups->elements;
3325 source.begin_iterator.skipfield_pointer = source_unused_groups->skipfield;
3326 source.end_iterator = source.begin_iterator;
3327 source_unused_groups->reset(0, nullptr, nullptr, 0);
3328 }
3329 }
3330
3331
3332
3333 void splice(hive &source)
3334 {
3335 splice(std::move(source));
3336 }
3337
3338
3339
3340private:
3341
3342 struct item_index_tuple
3343 {
3344 pointer original_location;
3345 size_type original_index;
3346
3347 item_index_tuple(const pointer _item, const size_type _index) noexcept:
3348 original_location(_item),
3349 original_index(_index)
3350 {}
3351 };
3352
3353
3354
3355 template <class comparison_function>
3356 struct sort_dereferencer
3357 {
3358 comparison_function stored_instance;
3359
3360 explicit sort_dereferencer(const comparison_function &function_instance):
3361 stored_instance(function_instance)
3362 {}
3363
3364 bool operator() (const item_index_tuple first, const item_index_tuple second)
3365 {
3366 return stored_instance(*(first.original_location), *(second.original_location));
3367 }
3368 };
3369
3370
3371
3372public:
3373
3374 template <class comparison_function>
3375 void sort(comparison_function compare)
3376 {
3377 if (total_size < 2)
3378 {
3379 return;
3380 }
3381
3382 if constexpr ((std::is_trivially_copyable<element_type>::value || std::is_move_assignable<element_type>::value) && sizeof(element_type) <= sizeof(element_type *) * 2) // If element is <= 2 pointers, just copy to an array and sort that then copy back - consumes less memory and may be faster
3383 {
3384 element_type * const sort_array = std::allocator_traits<allocator_type>::allocate(*this, total_size, nullptr), * const end = sort_array + total_size;
3385
3386 if constexpr (!std::is_trivially_copyable<element_type>::value && std::is_move_assignable<element_type>::value)
3387 {
3388 std::uninitialized_copy(std::make_move_iterator(begin_iterator), std::make_move_iterator(end_iterator), sort_array);
3389 }
3390 else
3391 {
3392 std::uninitialized_copy(begin_iterator, end_iterator, sort_array);
3393 }
3394
3395 std::sort(sort_array, end, compare);
3396
3397 if constexpr (!std::is_trivially_copyable<element_type>::value && std::is_move_assignable<element_type>::value)
3398 {
3399 std::copy(std::make_move_iterator(sort_array), std::make_move_iterator(end), begin_iterator);
3400 }
3401 else
3402 {
3403 std::copy(sort_array, end, begin_iterator);
3404
3405 if (!std::is_trivially_destructible<element_type>::value)
3406 {
3407 for (element_type *current = sort_array; current != end; ++current)
3408 {
3409 std::allocator_traits<allocator_type>::destroy(*this, current);
3410 }
3411 }
3412 }
3413
3414 std::allocator_traits<allocator_type>::deallocate(*this, sort_array, total_size);
3415 }
3416 else
3417 {
3418 tuple_pointer_type const sort_array = std::allocator_traits<tuple_allocator_type>::allocate(tuple_allocator, total_size, nullptr);
3419 tuple_pointer_type tuple_pointer = sort_array;
3420
3421 // Construct pointers to all elements in the sequence:
3422 size_type index = 0;
3423
3424 for (iterator current_element = begin_iterator; current_element != end_iterator; ++current_element, ++tuple_pointer, ++index)
3425 {
3426 std::allocator_traits<tuple_allocator_type>::construct(tuple_allocator, tuple_pointer, &*current_element, index);
3427 }
3428
3429 // Now, sort the pointers by the values they point to:
3430 std::sort(sort_array, tuple_pointer, sort_dereferencer<comparison_function>(compare));
3431
3432 // Sort the actual elements via the tuple array:
3433 index = 0;
3434
3435 for (tuple_pointer_type current_tuple = sort_array; current_tuple != tuple_pointer; ++current_tuple, ++index)
3436 {
3437 if (current_tuple->original_index != index)
3438 {
3439 element_type end_value = std::move(*(current_tuple->original_location));
3440 size_type destination_index = index;
3441 size_type source_index = current_tuple->original_index;
3442
3443 do
3444 {
3445 *(sort_array[destination_index].original_location) = std::move(*(sort_array[source_index].original_location));
3446 destination_index = source_index;
3447 source_index = sort_array[destination_index].original_index;
3448 sort_array[destination_index].original_index = destination_index;
3449 } while (source_index != index);
3450
3451 *(sort_array[destination_index].original_location) = std::move(end_value);
3452 }
3453 }
3454
3455 std::allocator_traits<tuple_allocator_type>::deallocate(tuple_allocator, sort_array, total_size);
3456 }
3457 }
3458
3459
3460
3461 void sort()
3462 {
3463 sort(std::less<element_type>());
3464 }
3465
3466
3467
3468 template <class comparison_function>
3469 size_type unique(comparison_function compare)
3470 {
3471 if (total_size < 2)
3472 {
3473 return 0;
3474 }
3475
3476 size_type count = 0;
3477
3478 for(const_iterator current = ++const_iterator(begin_iterator), previous = begin_iterator; current != end_iterator;)
3479 {
3480 if (compare(*current, *previous))
3481 {
3482 const size_type original_count = ++count;
3483 const_iterator last(++const_iterator(current));
3484
3485 while(last != end_iterator && compare(*last, *previous))
3486 {
3487 ++last;
3488 ++count;
3489 }
3490
3491 previous = current;
3492
3493 if (count != original_count)
3494 {
3495 current = erase(current, last); // optimised range-erase
3496 }
3497 else
3498 {
3499 current = erase(current);
3500 }
3501 }
3502 else
3503 {
3504 previous = current++;
3505 }
3506 }
3507
3508 return count;
3509 }
3510
3511
3512
3514 {
3515 return unique(std::equal_to<element_type>());
3516 }
3517
3518
3519
3520 void swap(hive &source) noexcept(std::allocator_traits<allocator_type>::propagate_on_container_swap::value || std::allocator_traits<allocator_type>::is_always_equal::value)
3521 {
3522 assert(&source != this);
3523
3524 if constexpr (std::allocator_traits<allocator_type>::is_always_equal::value && std::is_trivial<group_pointer_type>::value && std::is_trivial<aligned_pointer_type>::value && std::is_trivial<skipfield_pointer_type>::value) // if all pointer types are trivial we can just copy using memcpy - avoids constructors/destructors etc and is faster
3525 {
3526 char temp[sizeof(hive)];
3527 std::memcpy(&temp, static_cast<void *>(this), sizeof(hive));
3528 std::memcpy(static_cast<void *>(this), static_cast<void *>(&source), sizeof(hive));
3529 std::memcpy(static_cast<void *>(&source), &temp, sizeof(hive));
3530 }
3531 else if constexpr (std::is_move_assignable<group_pointer_type>::value && std::is_move_assignable<aligned_pointer_type>::value && std::is_move_assignable<skipfield_pointer_type>::value && std::is_move_constructible<group_pointer_type>::value && std::is_move_constructible<aligned_pointer_type>::value && std::is_move_constructible<skipfield_pointer_type>::value)
3532 {
3533 hive temp(std::move(source));
3534 source = std::move(*this);
3535 *this = std::move(temp);
3536 }
3537 else
3538 {
3539 const iterator swap_end_iterator = end_iterator, swap_begin_iterator = begin_iterator;
3540 const group_pointer_type swap_erasure_groups_head = erasure_groups_head, swap_unused_groups_head = unused_groups_head;
3541 const size_type swap_total_size = total_size, swap_total_capacity = total_capacity;
3542 const skipfield_type swap_min_block_capacity = min_block_capacity, swap_max_block_capacity = max_block_capacity;
3543
3544 end_iterator = source.end_iterator;
3545 begin_iterator = source.begin_iterator;
3546 erasure_groups_head = source.erasure_groups_head;
3547 unused_groups_head = source.unused_groups_head;
3548 total_size = source.total_size;
3549 total_capacity = source.total_capacity;
3550 min_block_capacity = source.min_block_capacity;
3551 max_block_capacity = source.max_block_capacity;
3552
3553 source.end_iterator = swap_end_iterator;
3554 source.begin_iterator = swap_begin_iterator;
3555 source.erasure_groups_head = swap_erasure_groups_head;
3556 source.unused_groups_head = swap_unused_groups_head;
3557 source.total_size = swap_total_size;
3558 source.total_capacity = swap_total_capacity;
3559 source.min_block_capacity = swap_min_block_capacity;
3560 source.max_block_capacity = swap_max_block_capacity;
3561
3562 if constexpr (std::allocator_traits<allocator_type>::propagate_on_container_swap::value && !std::allocator_traits<allocator_type>::is_always_equal::value)
3563 {
3564 allocator_type swap_allocator = std::move(static_cast<allocator_type &>(source));
3565 static_cast<allocator_type &>(source) = std::move(static_cast<allocator_type &>(*this));
3566 static_cast<allocator_type &>(*this) = std::move(swap_allocator);
3567
3568 // Reconstruct rebinds for swapped allocators:
3569 group_allocator = group_allocator_type(*this);
3570 aligned_struct_allocator = aligned_struct_allocator_type(*this);
3571 skipfield_allocator = skipfield_allocator_type(*this);
3572 tuple_allocator = tuple_allocator_type(*this);
3573 source.group_allocator = group_allocator_type(source);
3574 source.aligned_struct_allocator = aligned_struct_allocator_type(source);
3575 source.skipfield_allocator = skipfield_allocator_type(source);
3576 source.tuple_allocator = tuple_allocator_type(source);
3577 } // else: undefined behaviour, as per standard
3578 }
3579 }
3580
3581
3582
3583 #ifdef PLF_BENCH_H // used for benchmarking only
3584 size_type memory() const noexcept
3585 {
3586 size_type memory_use = sizeof(*this); // sizeof hive basic structure
3587 end_iterator.group_pointer->next_group = unused_groups_head; // temporarily link the main groups and unused groups (reserved groups) in order to only have one loop below instead of several
3588
3589 for(group_pointer_type current = begin_iterator.group_pointer; current != nullptr; current = current->next_group)
3590 {
3591 memory_use += sizeof(group) + (get_aligned_block_capacity(current->capacity) * sizeof(aligned_allocation_struct)); // add memory block sizes and the size of the group structs themselves. The original calculation, including divisor, is necessary in order to correctly round up the number of allocations
3592 }
3593
3594 end_iterator.group_pointer->next_group = nullptr; // unlink main groups and unused groups
3595 return memory_use;
3596 }
3597 #endif
3598
3599
3600
3601
3602 // Iterators:
3603 template <bool is_const>
3605 {
3606 private:
3607 typedef typename hive::group_pointer_type group_pointer_type;
3608 typedef typename hive::aligned_pointer_type aligned_pointer_type;
3609 typedef typename hive::skipfield_pointer_type skipfield_pointer_type;
3610
3611 group_pointer_type group_pointer;
3612 aligned_pointer_type element_pointer;
3613 skipfield_pointer_type skipfield_pointer;
3614
3615 public:
3617 typedef std::bidirectional_iterator_tag iterator_category;
3618 typedef std::bidirectional_iterator_tag iterator_concept;
3622 typedef typename std::conditional_t<is_const, typename hive::const_pointer, typename hive::pointer> pointer;
3623 typedef typename std::conditional_t<is_const, typename hive::const_reference, typename hive::reference> reference;
3624
3625 friend class hive;
3628
3629 template <hive_iterator_concept it_type, typename distance_type>
3630 friend void std::advance(it_type &it, const distance_type distance);
3631
3632 template <hive_iterator_concept it_type>
3633 friend it_type std::next(it_type it, const typename std::iterator_traits<it_type>::difference_type distance);
3634
3635 template <hive_iterator_concept it_type>
3636 friend it_type std::prev(it_type it, const typename std::iterator_traits<it_type>::difference_type distance);
3637
3638 template <hive_iterator_concept it_type>
3639 friend typename std::iterator_traits<it_type>::difference_type std::distance(const it_type first, const it_type last);
3640
3641
3642
3643 hive_iterator() noexcept:
3644 group_pointer(nullptr),
3645 element_pointer(nullptr),
3646 skipfield_pointer(nullptr)
3647 {}
3648
3649
3650
3651 hive_iterator (const hive_iterator &source) noexcept: // Note: Surprisingly, use of = default here and in other simple constructors results in slowdowns of ~10% in many benchmarks under GCC
3652 group_pointer(source.group_pointer),
3653 element_pointer(source.element_pointer),
3654 skipfield_pointer(source.skipfield_pointer)
3655 {}
3656
3657
3658
3659 template <bool is_const_it = is_const, class = std::enable_if_t<is_const_it> >
3660 hive_iterator(const hive_iterator<false> &source) noexcept:
3661 group_pointer(source.group_pointer),
3662 element_pointer(source.element_pointer),
3663 skipfield_pointer(source.skipfield_pointer)
3664 {}
3665
3666
3667
3668 hive_iterator(hive_iterator &&source) noexcept:
3669 group_pointer(std::move(source.group_pointer)),
3670 element_pointer(std::move(source.element_pointer)),
3671 skipfield_pointer(std::move(source.skipfield_pointer))
3672 {}
3673
3674
3675
3676 template <bool is_const_it = is_const, class = std::enable_if_t<is_const_it> >
3678 group_pointer(std::move(source.group_pointer)),
3679 element_pointer(std::move(source.element_pointer)),
3680 skipfield_pointer(std::move(source.skipfield_pointer))
3681 {}
3682
3683
3684
3685 hive_iterator & operator = (const hive_iterator &source) noexcept
3686 {
3687 group_pointer = source.group_pointer;
3688 element_pointer = source.element_pointer;
3689 skipfield_pointer = source.skipfield_pointer;
3690 return *this;
3691 }
3692
3693
3694
3695 template <bool is_const_it = is_const, class = std::enable_if_t<is_const_it> >
3697 {
3698 group_pointer = source.group_pointer;
3699 element_pointer = source.element_pointer;
3700 skipfield_pointer = source.skipfield_pointer;
3701 return *this;
3702 }
3703
3704
3705
3707 {
3708 assert(&source != this);
3709 group_pointer = std::move(source.group_pointer);
3710 element_pointer = std::move(source.element_pointer);
3711 skipfield_pointer = std::move(source.skipfield_pointer);
3712 return *this;
3713 }
3714
3715
3716
3717 template <bool is_const_it = is_const, class = std::enable_if_t<is_const_it> >
3719 {
3720 group_pointer = std::move(source.group_pointer);
3721 element_pointer = std::move(source.element_pointer);
3722 skipfield_pointer = std::move(source.skipfield_pointer);
3723 return *this;
3724 }
3725
3726
3727
3728 bool operator == (const hive_iterator &rh) const noexcept
3729 {
3730 return (element_pointer == rh.element_pointer);
3731 }
3732
3733
3734
3735 bool operator == (const hive_iterator<!is_const> &rh) const noexcept
3736 {
3737 return (element_pointer == rh.element_pointer);
3738 }
3739
3740
3741
3742 bool operator != (const hive_iterator &rh) const noexcept
3743 {
3744 return (element_pointer != rh.element_pointer);
3745 }
3746
3747
3748
3749 bool operator != (const hive_iterator<!is_const> &rh) const noexcept
3750 {
3751 return (element_pointer != rh.element_pointer);
3752 }
3753
3754
3755
3756 reference operator * () const // may cause exception with uninitialized iterator
3757 {
3758 return *pointer_cast<pointer>(element_pointer);
3759 }
3760
3761
3762
3764 {
3765 return pointer_cast<pointer>(element_pointer);
3766 }
3767
3768
3769
3771 {
3772 assert(group_pointer != nullptr); // covers uninitialised hive_iterator
3773 skipfield_type skip = *(++skipfield_pointer);
3774
3775 if ((element_pointer += static_cast<size_type>(skip) + 1u) == pointer_cast<aligned_pointer_type>(group_pointer->skipfield) && group_pointer->next_group != nullptr) // ie. beyond end of current memory block. Second condition allows iterator to reach end(), which may be 1 past end of block, if block has been fully used and another block is not allocated
3776 {
3777 group_pointer = group_pointer->next_group;
3778 const aligned_pointer_type elements = group_pointer->elements;
3779 const skipfield_pointer_type skipfield = group_pointer->skipfield;
3780 skip = *skipfield;
3781 element_pointer = elements + skip;
3782 skipfield_pointer = skipfield;
3783 }
3784
3785 skipfield_pointer += skip;
3786 return *this;
3787 }
3788
3789
3790
3792 {
3793 const hive_iterator copy(*this);
3794 ++*this;
3795 return copy;
3796 }
3797
3798
3799
3801 {
3802 assert(group_pointer != nullptr);
3803
3804 if (element_pointer != group_pointer->elements) // ie. not already at beginning of group
3805 {
3806 const skipfield_type skip = *(--skipfield_pointer);
3807 skipfield_pointer -= skip;
3808
3809 if ((element_pointer -= static_cast<size_type>(skip) + 1u) != group_pointer->elements - 1) // ie. iterator was not already at beginning of hive (with some previous consecutive deleted elements), and skipfield does not takes us into the previous group)
3810 {
3811 return *this;
3812 }
3813 }
3814
3815 group_pointer = group_pointer->previous_group;
3816 const skipfield_pointer_type skipfield = group_pointer->skipfield + group_pointer->capacity - 1;
3817 const skipfield_type skip = *skipfield;
3818 element_pointer = (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - 1) - skip;
3819 skipfield_pointer = skipfield - skip;
3820 return *this;
3821 }
3822
3823
3824
3826 {
3827 const hive_iterator copy(*this);
3828 --*this;
3829 return copy;
3830 }
3831
3832
3833
3834 // Less-than etc operators retained as GCC codegen synthesis from <=> is slower and bulkier for same operations:
3835 template <bool is_const_it>
3836 bool operator > (const hive_iterator<is_const_it> &rh) const noexcept
3837 {
3838 return ((group_pointer == rh.group_pointer) & std::greater()(element_pointer, rh.element_pointer)) ||
3839 (group_pointer != rh.group_pointer && group_pointer->group_number > rh.group_pointer->group_number);
3840 }
3841
3842
3843
3844 template <bool is_const_it>
3845 bool operator < (const hive_iterator<is_const_it> &rh) const noexcept
3846 {
3847 return rh > *this;
3848 }
3849
3850
3851
3852 template <bool is_const_it>
3853 bool operator >= (const hive_iterator<is_const_it> &rh) const noexcept
3854 {
3855 return !(rh > *this);
3856 }
3857
3858
3859
3860 template <bool is_const_it>
3861 bool operator <= (const hive_iterator<is_const_it> &rh) const noexcept
3862 {
3863 return !(*this > rh);
3864 }
3865
3866
3867
3868 template <bool is_const_it>
3869 std::strong_ordering operator <=> (const hive_iterator<is_const_it> &rh) const noexcept
3870 {
3871 return (element_pointer == rh.element_pointer) ? std::strong_ordering::equal : ((*this > rh) ? std::strong_ordering::greater : std::strong_ordering::less);
3872 }
3873
3874
3875
3876 private:
3877 // Used by cend(), erase() etc:
3878 hive_iterator(const group_pointer_type group_p, const aligned_pointer_type element_p, const skipfield_pointer_type skipfield_p) noexcept:
3879 group_pointer(group_p),
3880 element_pointer(element_p),
3881 skipfield_pointer(skipfield_p)
3882 {}
3883
3884
3885
3886 // Advance implementation:
3887
3888 void advance(difference_type distance) // Cannot be noexcept due to the possibility of an uninitialized iterator
3889 {
3890 assert(group_pointer != nullptr); // covers uninitialized hive_iterator && empty group
3891
3892 // Now, run code based on the nature of the distance type - negative, positive or zero:
3893 if (distance > 0) // ie. +=
3894 {
3895 // Code explanation:
3896 // For the initial state of the iterator, we don't know which elements have been erased before that element in that group.
3897 // So for the first group, we follow the following logic:
3898 // 1. If no elements have been erased in the group, we do simple pointer addition to progress, either to within the group (if the distance is small enough) or the end of the group and subtract from distance accordingly.
3899 // 2. If any of the first group's elements have been erased, we manually iterate, as we don't know whether the erased elements occur before or after the initial iterator position, and we subtract 1 from the distance amount each time we iterate. Iteration continues until either distance becomes zero, or we reach the end of the group.
3900
3901 // For all subsequent groups, we follow this logic:
3902 // 1. If distance is larger than the total number of non-erased elements in a group, we skip that group and subtract the number of elements in that group from distance.
3903 // 2. If distance is smaller than the total number of non-erased elements in a group, then:
3904 // a. If there are no erased elements in the group we simply add distance to group->elements to find the new location for the iterator.
3905 // b. If there are erased elements in the group, we manually iterate and subtract 1 from distance on each iteration, until the new iterator location is found ie. distance = 0.
3906
3907 // Note: incrementing element_pointer is avoided until necessary to avoid needless calculations.
3908
3909 if (group_pointer->next_group == nullptr && element_pointer == pointer_cast<aligned_pointer_type>(group_pointer->skipfield)) // Check if we're already beyond back of final block
3910 {
3911 return;
3912 }
3913
3914
3915 // Special case for initial element pointer and initial group (we don't know how far into the group the element pointer is)
3916 if (element_pointer != group_pointer->elements + *(group_pointer->skipfield)) // ie. != first non-erased element in group
3917 {
3918 const difference_type distance_from_end = pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - element_pointer;
3919
3920 if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // ie. if there are no erasures in the group
3921 {
3922 if (distance < distance_from_end)
3923 {
3924 element_pointer += distance;
3925 skipfield_pointer += distance;
3926 return;
3927 }
3928 else if (group_pointer->next_group == nullptr) // either we've reached end() or gone beyond it, so bound to back of block
3929 {
3930 element_pointer += distance_from_end;
3931 skipfield_pointer += distance_from_end;
3932 return;
3933 }
3934 else
3935 {
3936 distance -= distance_from_end;
3937 }
3938 }
3939 else
3940 {
3941 const skipfield_pointer_type endpoint = skipfield_pointer + distance_from_end;
3942
3943 while(true)
3944 {
3945 skipfield_pointer += *++skipfield_pointer;
3946 --distance;
3947
3948 if (skipfield_pointer == endpoint)
3949 {
3950 break;
3951 }
3952 else if (distance == 0)
3953 {
3954 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
3955 return;
3956 }
3957 }
3958
3959 if (group_pointer->next_group == nullptr) // either we've reached end() or gone beyond it, so bound to end of block
3960 {
3961 element_pointer = pointer_cast<aligned_pointer_type>(group_pointer->skipfield);
3962 return;
3963 }
3964 }
3965
3966 group_pointer = group_pointer->next_group;
3967
3968 if (distance == 0)
3969 {
3970 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
3971 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
3972 return;
3973 }
3974 }
3975
3976
3977 // Intermediary groups - at the start of this code block and the subsequent block, the position of the iterator is assumed to be the first non-erased element in the current group:
3978 while (static_cast<difference_type>(group_pointer->size) <= distance)
3979 {
3980 if (group_pointer->next_group == nullptr) // either we've reached end() or gone beyond it, so bound to end of block
3981 {
3982 element_pointer = pointer_cast<aligned_pointer_type>(group_pointer->skipfield);
3983 skipfield_pointer = group_pointer->skipfield + group_pointer->capacity;
3984 return;
3985 }
3986 else if ((distance -= group_pointer->size) == 0)
3987 {
3988 group_pointer = group_pointer->next_group;
3989 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
3990 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
3991 return;
3992 }
3993 else
3994 {
3995 group_pointer = group_pointer->next_group;
3996 }
3997 }
3998
3999
4000 // Final group (if not already reached):
4001 if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // No erasures in this group, use straight pointer addition
4002 {
4003 element_pointer = group_pointer->elements + distance;
4004 skipfield_pointer = group_pointer->skipfield + distance;
4005 return;
4006 }
4007 else // We already know size > distance due to the intermediary group checks above - safe to ignore endpoint check condition while incrementing here:
4008 {
4009 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4010
4011 do
4012 {
4013 skipfield_pointer += *++skipfield_pointer;
4014 } while(--distance != 0);
4015
4016 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4017 return;
4018 }
4019
4020 return;
4021 }
4022 else if (distance < 0) // for negative change
4023 {
4024 // Code logic is very similar to += above
4025 if(group_pointer->previous_group == nullptr && element_pointer == group_pointer->elements + *(group_pointer->skipfield)) // check if we're already at begin()
4026 {
4027 return;
4028 }
4029
4030 distance = -distance;
4031
4032 // Special case for initial element pointer and initial group (we don't know how far into the group the element pointer is)
4033 if (element_pointer != pointer_cast<aligned_pointer_type>(group_pointer->skipfield)) // not currently at the back of a block
4034 {
4035 if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // ie. no prior erasures have occurred in this group
4036 {
4037 const difference_type distance_from_beginning = static_cast<difference_type>(element_pointer - group_pointer->elements);
4038
4039 if (distance <= distance_from_beginning)
4040 {
4041 element_pointer -= distance;
4042 skipfield_pointer -= distance;
4043 return;
4044 }
4045 else if (group_pointer->previous_group == nullptr) // ie. we've gone before begin(), so bound to begin()
4046 {
4047 element_pointer = group_pointer->elements;
4048 skipfield_pointer = group_pointer->skipfield;
4049 return;
4050 }
4051 else
4052 {
4053 distance -= distance_from_beginning;
4054 }
4055 }
4056 else
4057 {
4058 const skipfield_pointer_type beginning_point = group_pointer->skipfield + *(group_pointer->skipfield);
4059
4060 while(skipfield_pointer != beginning_point)
4061 {
4062 skipfield_pointer -= *--skipfield_pointer;
4063
4064 if (--distance == 0)
4065 {
4066 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4067 return;
4068 }
4069 }
4070
4071 if (group_pointer->previous_group == nullptr)
4072 {
4073 element_pointer = group_pointer->elements + *(group_pointer->skipfield); // This is first group, so bound to begin() (just in case final decrement took us before begin())
4074 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4075 return;
4076 }
4077 }
4078
4079 group_pointer = group_pointer->previous_group;
4080 }
4081
4082
4083 // Intermediary groups - at the start of this code block and the subsequent block, the position of the iterator is assumed to be either the first non-erased element in the next group over, or end():
4084 while(static_cast<difference_type>(group_pointer->size) < distance)
4085 {
4086 if (group_pointer->previous_group == nullptr) // we've gone beyond begin(), so bound to it
4087 {
4088 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
4089 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4090 return;
4091 }
4092
4093 distance -= group_pointer->size;
4094 group_pointer = group_pointer->previous_group;
4095 }
4096
4097
4098 // Final group (if not already reached):
4099 if (static_cast<difference_type>(group_pointer->size) == distance) // go to front of group
4100 {
4101 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
4102 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4103 return;
4104 }
4105 else if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // ie. no erased elements in this group
4106 {
4107 element_pointer = pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - distance;
4108 skipfield_pointer = (group_pointer->skipfield + group_pointer->size) - distance;
4109 return;
4110 }
4111 else // ie. no more groups to traverse but there are erased elements in this group
4112 {
4113 skipfield_pointer = group_pointer->skipfield + (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - group_pointer->elements);
4114
4115 do
4116 {
4117 skipfield_pointer -= *--skipfield_pointer;
4118 } while(--distance != 0);
4119
4120 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4121 return;
4122 }
4123 }
4124
4125 // Only distance == 0 reaches here
4126 }
4127
4128
4129
4130 // distance implementation:
4131
4132 difference_type distance(const hive_iterator &last) const
4133 {
4134 // Code logic:
4135 // If iterators are the same, return 0
4136 // Otherwise, find which iterator is later in hive, copy that to iterator2. Copy the lower to iterator1.
4137 // If they are not pointing to elements in the same group, process the intermediate groups and add distances,
4138 // skipping manual incrementation in all but the initial and final groups.
4139 // In the initial and final groups, manual incrementation must be used to calculate distance, if there have been no prior erasures in those groups.
4140 // If there are no prior erasures in either of those groups, we can use pointer arithmetic to calculate the distances for those groups.
4141
4142 assert(!(group_pointer == nullptr) && !(last.group_pointer == nullptr)); // Check that they are both initialized
4143
4144 if (last.element_pointer == element_pointer)
4145 {
4146 return 0;
4147 }
4148
4150 hive_iterator iterator1 = *this, iterator2 = last;
4151 const bool swap = iterator1 > iterator2;
4152
4153 if (swap)
4154 {
4155 iterator1 = last;
4156 iterator2 = *this;
4157 }
4158
4159 if (iterator1.group_pointer != iterator2.group_pointer) // if not in same group, process intermediate groups
4160 {
4161 // Process initial group:
4162 if (iterator1.group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // If no prior erasures have occured in this group we can do simple addition
4163 {
4164 distance += static_cast<difference_type>(pointer_cast<aligned_pointer_type>(iterator1.group_pointer->skipfield) - iterator1.element_pointer);
4165 }
4166 else if (iterator1.element_pointer == iterator1.group_pointer->elements + *(iterator1.group_pointer->skipfield)) // ie. element is at start of group - rare case
4167 {
4168 distance += static_cast<difference_type>(iterator1.group_pointer->size);
4169 }
4170 else // Manually iterate to find distance to end of group:
4171 {
4172 const skipfield_pointer_type endpoint = iterator1.skipfield_pointer + (pointer_cast<aligned_pointer_type>(iterator1.group_pointer->skipfield) - iterator1.element_pointer);
4173
4174 while (iterator1.skipfield_pointer != endpoint)
4175 {
4176 iterator1.skipfield_pointer += *++iterator1.skipfield_pointer;
4177 ++distance;
4178 }
4179 }
4180
4181 // Process all other intermediate groups:
4182 iterator1.group_pointer = iterator1.group_pointer->next_group;
4183
4184 while (iterator1.group_pointer != iterator2.group_pointer)
4185 {
4186 distance += static_cast<difference_type>(iterator1.group_pointer->size);
4187 iterator1.group_pointer = iterator1.group_pointer->next_group;
4188 }
4189
4190 iterator1.skipfield_pointer = iterator1.group_pointer->skipfield + *(iterator1.group_pointer->skipfield);
4191 }
4192
4193
4194 if (iterator2.group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // ie. no erasures in this group, direct subtraction is possible
4195 {
4196 distance += iterator2.skipfield_pointer - iterator1.skipfield_pointer;
4197 }
4198 else if (iterator1.element_pointer == iterator2.group_pointer->elements + *(iterator2.group_pointer->skipfield) && iterator2.element_pointer + 1 + *(iterator2.skipfield_pointer + 1) == pointer_cast<aligned_pointer_type>(iterator2.group_pointer->skipfield)) // ie. if iterator1 is at beginning of block (have to check this in case first and last are in the same block to begin with) and iterator2 is last element in the block
4199 {
4200 distance += static_cast<difference_type>(iterator2.group_pointer->size) - 1;
4201 }
4202 else
4203 {
4204 while (iterator1.skipfield_pointer != iterator2.skipfield_pointer)
4205 {
4206 iterator1.skipfield_pointer += *++iterator1.skipfield_pointer;
4207 ++distance;
4208 }
4209 }
4210
4211
4212 if (swap)
4213 {
4214 distance = -distance;
4215 }
4216
4217 return distance;
4218 }
4219 }; // hive_iterator
4220
4221
4222
4223
4224
4225 // Reverse iterators:
4226
4227 template <bool is_const_r>
4229 {
4230 private:
4231 typedef typename hive::group_pointer_type group_pointer_type;
4232 typedef typename hive::aligned_pointer_type aligned_pointer_type;
4233 typedef typename hive::skipfield_pointer_type skipfield_pointer_type;
4234
4235 protected:
4237
4238 public:
4240 typedef std::bidirectional_iterator_tag iterator_category;
4241 typedef std::bidirectional_iterator_tag iterator_concept;
4245 typedef typename std::conditional_t<is_const_r, typename hive::const_pointer, typename hive::pointer> pointer;
4246 typedef typename std::conditional_t<is_const_r, typename hive::const_reference, typename hive::reference> reference;
4247
4248 friend class hive;
4249
4250 template <hive_iterator_concept it_type, typename distance_type>
4251 friend void std::advance(it_type &it, const distance_type distance);
4252
4253 template <hive_iterator_concept it_type>
4254 friend it_type std::next(it_type it, const typename std::iterator_traits<it_type>::difference_type distance);
4255
4256 template <hive_iterator_concept it_type>
4257 friend it_type std::prev(it_type it, const typename std::iterator_traits<it_type>::difference_type distance);
4258
4259 template <hive_iterator_concept it_type>
4260 friend typename std::iterator_traits<it_type>::difference_type std::distance(const it_type first, const it_type last);
4261
4262
4263
4265 {}
4266
4267
4269 current(source.current)
4270 {}
4271
4272
4273 template <bool is_const_rit = is_const_r, class = std::enable_if_t<is_const_rit> >
4275 current(source.current)
4276 {}
4277
4278
4280 current(source)
4281 {
4282 ++(*this);
4283 }
4284
4285
4286 template <bool is_const_rit = is_const_r, class = std::enable_if_t<is_const_rit> >
4288 current(source)
4289 {
4290 ++(*this);
4291 }
4292
4293
4295 current(std::move(source.current))
4296 {}
4297
4298
4299 template <bool is_const_rit = is_const_r, class = std::enable_if_t<is_const_rit> >
4301 current(std::move(source.current))
4302 {}
4303
4304
4306 {
4307 current = source;
4308 ++(*this);
4309 return *this;
4310 }
4311
4312
4313 template <bool is_const_rit = is_const_r, class = std::enable_if_t<is_const_rit> >
4315 {
4316 current = source;
4317 ++(*this);
4318 return *this;
4319 }
4320
4321
4323 {
4324 current = source.current;
4325 return *this;
4326 }
4327
4328
4329 template <bool is_const_rit = is_const_r, class = std::enable_if_t<is_const_rit> >
4331 {
4332 current = source.current;
4333 return *this;
4334 }
4335
4336
4338 {
4339 assert(&source != this);
4340 current = std::move(source.current);
4341 return *this;
4342 }
4343
4344
4345 template <bool is_const_rit = is_const_r, class = std::enable_if_t<is_const_rit> >
4347 {
4348 assert(&source != this);
4349 current = std::move(source.current);
4350 return *this;
4351 }
4352
4353
4354
4355 bool operator == (const hive_reverse_iterator &rh) const noexcept
4356 {
4357 return (current == rh.current);
4358 }
4359
4360
4361
4362 bool operator == (const hive_reverse_iterator<!is_const_r> &rh) const noexcept
4363 {
4364 return (current == rh.current);
4365 }
4366
4367
4368
4369 bool operator != (const hive_reverse_iterator &rh) const noexcept
4370 {
4371 return (current != rh.current);
4372 }
4373
4374
4375
4376 bool operator != (const hive_reverse_iterator<!is_const_r> &rh) const noexcept
4377 {
4378 return (current != rh.current);
4379 }
4380
4381
4382
4383 reference operator * () const noexcept
4384 {
4385 return *pointer_cast<pointer>(current.element_pointer);
4386 }
4387
4388
4389
4390 pointer operator -> () const noexcept
4391 {
4392 return pointer_cast<pointer>(current.element_pointer);
4393 }
4394
4395
4396
4397 // In this case we have to redefine the algorithm, rather than using the internal iterator's -- operator, in order for the reverse_iterator to be allowed to reach rend() ie. begin_iterator - 1
4399 {
4400 group_pointer_type &group_pointer = current.group_pointer;
4401 aligned_pointer_type &element_pointer = current.element_pointer;
4402 skipfield_pointer_type &skipfield_pointer = current.skipfield_pointer;
4403
4404 assert(group_pointer != nullptr);
4405
4406 if (element_pointer != group_pointer->elements) // ie. not already at beginning of group
4407 {
4408 element_pointer -= static_cast<size_type>(*(--skipfield_pointer)) + 1u;
4409 skipfield_pointer -= *skipfield_pointer;
4410
4411 if (!(element_pointer == group_pointer->elements - 1 && group_pointer->previous_group == nullptr)) // ie. iterator is not == rend()
4412 {
4413 return *this;
4414 }
4415 }
4416
4417 if (group_pointer->previous_group != nullptr) // ie. not first group in hive
4418 {
4419 group_pointer = group_pointer->previous_group;
4420 skipfield_pointer = group_pointer->skipfield + group_pointer->capacity - 1;
4421 element_pointer = (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - 1) - *skipfield_pointer;
4422 skipfield_pointer -= *skipfield_pointer;
4423 }
4424 else // necessary so that reverse_iterator can end up == rend(), if we were already at first element in hive
4425 {
4426 --element_pointer;
4427 --skipfield_pointer;
4428 }
4429
4430 return *this;
4431 }
4432
4433
4434
4436 {
4437 const hive_reverse_iterator copy(*this);
4438 ++*this;
4439 return copy;
4440 }
4441
4442
4443
4445 {
4446 ++current;
4447 return *this;
4448 }
4449
4450
4451
4453 {
4454 const hive_reverse_iterator copy(*this);
4455 --*this;
4456 return copy;
4457 }
4458
4459
4460
4462 {
4463 return (current.group_pointer != nullptr) ? ++(hive_iterator<is_const_r>(current)) : hive_iterator<is_const_r>(nullptr, nullptr, nullptr);
4464 }
4465
4466
4467
4468 template <bool is_const_it>
4470 {
4471 return (rh.current > current);
4472 }
4473
4474
4475
4476 template <bool is_const_it>
4477 bool operator < (const hive_reverse_iterator<is_const_it> &rh) const noexcept
4478 {
4479 return (current > rh.current);
4480 }
4481
4482
4483
4484 template <bool is_const_it>
4486 {
4487 return !(current > rh.current);
4488 }
4489
4490
4491
4492 template <bool is_const_it>
4493 bool operator <= (const hive_reverse_iterator<is_const_it> &rh) const noexcept
4494 {
4495 return !(rh.current > current);
4496 }
4497
4498
4499
4500 template <bool is_const_it>
4501 std::strong_ordering operator <=> (const hive_reverse_iterator<is_const_it> &rh) const noexcept
4502 {
4503 return (rh.current <=> current);
4504 }
4505
4506
4507
4508 private:
4509 // Used by rend(), etc:
4510 hive_reverse_iterator(const group_pointer_type group_p, const aligned_pointer_type element_p, const skipfield_pointer_type skipfield_p) noexcept: current(group_p, element_p, skipfield_p) {}
4511
4512
4513
4514 // distance implementation:
4515
4517 {
4518 return last.current.distance(current);
4519 }
4520
4521
4522
4523 // Advance for reverse_iterator and const_reverse_iterator - this needs to be implemented slightly differently to forward-iterator's advance, as current needs to be able to reach rend() (ie. begin() - 1) and to be bounded by rbegin():
4525 {
4526 group_pointer_type &group_pointer = current.group_pointer;
4527 aligned_pointer_type &element_pointer = current.element_pointer;
4528 skipfield_pointer_type &skipfield_pointer = current.skipfield_pointer;
4529
4530 assert(element_pointer != nullptr);
4531
4532 if (distance > 0)
4533 {
4534 if (group_pointer->previous_group == nullptr && element_pointer == group_pointer->elements - 1) // Check if we're already at rend()
4535 {
4536 return;
4537 }
4538
4539 if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max())
4540 {
4541 const difference_type distance_from_beginning = element_pointer - group_pointer->elements;
4542
4543 if (distance <= distance_from_beginning)
4544 {
4545 element_pointer -= distance;
4546 skipfield_pointer -= distance;
4547 return;
4548 }
4549 else if (group_pointer->previous_group == nullptr) // Either we've reached rend() or gone beyond it, so bound to rend()
4550 {
4551 element_pointer = group_pointer->elements - 1;
4552 skipfield_pointer = group_pointer->skipfield - 1;
4553 return;
4554 }
4555 else
4556 {
4557 distance -= distance_from_beginning;
4558 }
4559 }
4560 else
4561 {
4562 const skipfield_pointer_type beginning_point = group_pointer->skipfield + *(group_pointer->skipfield);
4563
4564 while(skipfield_pointer != beginning_point)
4565 {
4566 skipfield_pointer -= *--skipfield_pointer;
4567
4568 if (--distance == 0)
4569 {
4570 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4571 return;
4572 }
4573 }
4574
4575 if (group_pointer->previous_group == nullptr)
4576 {
4577 element_pointer = group_pointer->elements - 1; // If we've reached rend(), bound to that
4578 skipfield_pointer = group_pointer->skipfield - 1;
4579 return;
4580 }
4581 }
4582
4583 group_pointer = group_pointer->previous_group;
4584
4585
4586 // Intermediary groups - at the start of this code block and the subsequent block, the position of the iterator is assumed to be the first non-erased element in the next group:
4587 while(static_cast<difference_type>(group_pointer->size) < distance)
4588 {
4589 if (group_pointer->previous_group == nullptr) // bound to rend()
4590 {
4591 element_pointer = group_pointer->elements - 1;
4592 skipfield_pointer = group_pointer->skipfield - 1;
4593 return;
4594 }
4595
4596 distance -= static_cast<difference_type>(group_pointer->size);
4597 group_pointer = group_pointer->previous_group;
4598 }
4599
4600
4601 // Final group (if not already reached)
4602 if (static_cast<difference_type>(group_pointer->size) == distance)
4603 {
4604 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
4605 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4606 return;
4607 }
4608 else if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max())
4609 {
4610 element_pointer = (group_pointer->elements + group_pointer->size) - distance;
4611 skipfield_pointer = (group_pointer->skipfield + group_pointer->size) - distance;
4612 return;
4613 }
4614 else
4615 {
4616 skipfield_pointer = group_pointer->skipfield + group_pointer->capacity;
4617
4618 do
4619 {
4620 skipfield_pointer -= *--skipfield_pointer;
4621 } while(--distance != 0);
4622
4623 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4624 return;
4625 }
4626 }
4627 else if (distance < 0)
4628 {
4629 if (group_pointer->next_group == nullptr && (element_pointer == (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - 1) - *(group_pointer->skipfield + (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - group_pointer->elements) - 1))) // Check if we're already at rbegin()
4630 {
4631 return;
4632 }
4633
4634 if (element_pointer != group_pointer->elements + *(group_pointer->skipfield)) // ie. != first non-erased element in group
4635 {
4636 if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max()) // ie. if there are no erasures in the group
4637 {
4638 const difference_type distance_from_end = pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - element_pointer;
4639
4640 if (distance < distance_from_end)
4641 {
4642 element_pointer += distance;
4643 skipfield_pointer += distance;
4644 return;
4645 }
4646 else if (group_pointer->next_group == nullptr) // either we've reached end() or gone beyond it, so bound to back of block
4647 {
4648 element_pointer += distance_from_end - 1;
4649 skipfield_pointer += distance_from_end - 1;
4650 return;
4651 }
4652 else
4653 {
4654 distance -= distance_from_end;
4655 }
4656 }
4657 else
4658 {
4659 const skipfield_pointer_type endpoint = skipfield_pointer + (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - element_pointer);
4660
4661 while(true)
4662 {
4663 skipfield_pointer += *++skipfield_pointer;
4664 --distance;
4665
4666 if (skipfield_pointer == endpoint)
4667 {
4668 break;
4669 }
4670 else if (distance == 0)
4671 {
4672 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4673 return;
4674 }
4675 }
4676
4677 if (group_pointer->next_group == nullptr)
4678 {
4679 return;
4680 }
4681 }
4682
4683 group_pointer = group_pointer->next_group;
4684
4685 if (distance == 0)
4686 {
4687 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
4688 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4689 return;
4690 }
4691 }
4692
4693
4694 // Intermediary groups:
4695 while(static_cast<difference_type>(group_pointer->size) <= distance)
4696 {
4697 if (group_pointer->next_group == nullptr) // bound to last element slot in block
4698 {
4699 skipfield_pointer = group_pointer->skipfield + group_pointer->capacity - 1;
4700 element_pointer = (pointer_cast<aligned_pointer_type>(group_pointer->skipfield) - 1) - *skipfield_pointer;
4701 skipfield_pointer -= *skipfield_pointer;
4702 return;
4703 }
4704 else if ((distance -= group_pointer->size) == 0)
4705 {
4706 group_pointer = group_pointer->next_group;
4707 element_pointer = group_pointer->elements + *(group_pointer->skipfield);
4708 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4709 return;
4710 }
4711 else
4712 {
4713 group_pointer = group_pointer->next_group;
4714 }
4715 }
4716
4717
4718 // Final group (if not already reached):
4719 if (group_pointer->free_list_head == std::numeric_limits<skipfield_type>::max())
4720 {
4721 element_pointer = group_pointer->elements + distance;
4722 skipfield_pointer = group_pointer->skipfield + distance;
4723 return;
4724 }
4725 else // we already know size > distance from previous loop - so it's safe to ignore endpoint check condition while incrementing:
4726 {
4727 skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield);
4728
4729 do
4730 {
4731 skipfield_pointer += *++skipfield_pointer;
4732 } while(--distance != 0);
4733
4734 element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield);
4735 return;
4736 }
4737
4738 return;
4739 }
4740 }
4741
4742 }; // hive_reverse_iterator
4743
4744
4745}; // hive
4746
4747
4748
4749// Used by std::erase_if() overload below:
4750template<class element_type>
4752{
4753 const element_type value;
4754
4755 explicit hive_eq_to(const element_type store_value): /* may not be noexcept as the element may allocate and therefore potentially throw when copied */
4756 value(store_value)
4757 {}
4758
4759 bool operator() (const element_type compare_value) const noexcept
4760 {
4761 return value == compare_value;
4762 }
4763};
4764
4765
4766
4767} // plf namespace
4768
4769
4770
4771
4772namespace std
4773{
4774
4775 template <class element_type, class allocator_type>
4776 void swap (plf::hive<element_type, allocator_type> &a, plf::hive<element_type, allocator_type> &b) noexcept(std::allocator_traits<allocator_type>::propagate_on_container_swap::value || std::allocator_traits<allocator_type>::is_always_equal::value)
4777 {
4778 a.swap(b);
4779 }
4780
4781
4782
4783 template <class element_type, class allocator_type, class predicate_function>
4785 {
4786 typedef typename plf::hive<element_type, allocator_type> hive;
4787 typedef typename hive::const_iterator const_iterator;
4788 typedef typename hive::size_type size_type;
4789 size_type count = 0;
4790
4791 for (const_iterator current = container.cbegin(); current != container.cend(); )
4792 {
4793 if (predicate(*current))
4794 {
4795 const size_type original_count = ++count;
4796 const_iterator last(++const_iterator(current));
4797 const const_iterator end = container.cend();
4798
4799 while(last != end && predicate(*last))
4800 {
4801 ++last;
4802 ++count;
4803 }
4804
4805 if (count != original_count)
4806 {
4807 current = container.erase(current, last); // Take advantage of optimized ranged overload
4808 }
4809 else
4810 {
4811 current = container.erase(current);
4812 }
4813 }
4814 else
4815 {
4816 ++current;
4817 }
4818 }
4819
4820 return count;
4821 }
4822
4823
4824
4825 template <class element_type, class allocator_type>
4827 {
4828 return erase_if(container, plf::hive_eq_to<element_type>(value));
4829 }
4830
4831
4832
4833 // std::reverse_iterator overload, to allow use of hive with ranges and make_reverse_iterator primarily:
4834 template <plf::hive_iterator_concept it_type>
4835 class reverse_iterator<it_type> : public it_type::reverse_type
4836 {
4837 public:
4838 typedef typename it_type::reverse_type rit;
4839 using rit::rit;
4840 };
4841
4842} // namespace std
4843
4844
4845#undef PLF_EXCEPTIONS_SUPPORT
4846
4847#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__)
4848 #pragma warning ( pop )
4849#endif
4850
4851#endif // PLF_HIVE_H
bool operator>=(const hive_iterator< is_const_it > &rh) const noexcept
Definition plf_hive.h:3853
friend void std::advance(it_type &it, const distance_type distance)
hive_iterator(const hive_iterator &source) noexcept
Definition plf_hive.h:3651
hive_iterator(hive_iterator< false > &&source) noexcept
Definition plf_hive.h:3677
std::strong_ordering operator<=>(const hive_iterator< is_const_it > &rh) const noexcept
Definition plf_hive.h:3869
friend std::iterator_traits< it_type >::difference_type std::distance(const it_type first, const it_type last)
hive_iterator & operator--()
Definition plf_hive.h:3800
pointer operator->() const
Definition plf_hive.h:3763
std::bidirectional_iterator_tag iterator_concept
Definition plf_hive.h:3618
hive::difference_type difference_type
Definition plf_hive.h:3620
std::conditional_t< is_const, typename hive::const_pointer, typename hive::pointer > pointer
Definition plf_hive.h:3622
bool operator>(const hive_iterator< is_const_it > &rh) const noexcept
Definition plf_hive.h:3836
reference operator*() const
Definition plf_hive.h:3756
hive_iterator & operator=(const hive_iterator &source) noexcept
Definition plf_hive.h:3685
hive_iterator(hive_iterator &&source) noexcept
Definition plf_hive.h:3668
hive_reverse_iterator< is_const > reverse_type
Definition plf_hive.h:3621
std::conditional_t< is_const, typename hive::const_reference, typename hive::reference > reference
Definition plf_hive.h:3623
std::bidirectional_iterator_tag iterator_category
Definition plf_hive.h:3617
hive_iterator(const hive_iterator< false > &source) noexcept
Definition plf_hive.h:3660
hive_iterator & operator++()
Definition plf_hive.h:3770
hive::value_type value_type
Definition plf_hive.h:3619
bool operator>=(const hive_reverse_iterator< is_const_it > &rh) const noexcept
Definition plf_hive.h:4485
hive_reverse_iterator(const hive_reverse_iterator< false > &source) noexcept
Definition plf_hive.h:4274
std::bidirectional_iterator_tag iterator_category
Definition plf_hive.h:4240
std::conditional_t< is_const_r, typename hive::const_pointer, typename hive::pointer > pointer
Definition plf_hive.h:4245
hive_reverse_iterator(const hive_reverse_iterator &source) noexcept
Definition plf_hive.h:4268
friend void std::advance(it_type &it, const distance_type distance)
std::bidirectional_iterator_tag iterator_concept
Definition plf_hive.h:4241
hive_reverse_iterator(hive_reverse_iterator< false > &&source) noexcept
Definition plf_hive.h:4300
reference operator*() const noexcept
Definition plf_hive.h:4383
friend std::iterator_traits< it_type >::difference_type std::distance(const it_type first, const it_type last)
hive_reverse_iterator(const hive_iterator< is_const_r > &source) noexcept
Definition plf_hive.h:4279
bool operator>(const hive_reverse_iterator< is_const_it > &rh) const noexcept
Definition plf_hive.h:4469
hive_iterator< is_const_r > base() const noexcept
Definition plf_hive.h:4461
std::strong_ordering operator<=>(const hive_reverse_iterator< is_const_it > &rh) const noexcept
Definition plf_hive.h:4501
hive_reverse_iterator(const hive_iterator< false > &source) noexcept
Definition plf_hive.h:4287
pointer operator->() const noexcept
Definition plf_hive.h:4390
hive_reverse_iterator & operator=(const hive_iterator< is_const_r > &source) noexcept
Definition plf_hive.h:4305
std::conditional_t< is_const_r, typename hive::const_reference, typename hive::reference > reference
Definition plf_hive.h:4246
hive_reverse_iterator & operator++()
Definition plf_hive.h:4398
hive::difference_type difference_type
Definition plf_hive.h:4244
hive_reverse_iterator(hive_reverse_iterator &&source) noexcept
Definition plf_hive.h:4294
hive_reverse_iterator & operator--()
Definition plf_hive.h:4444
friend iterator
Definition plf_hive.h:146
hive_limits block_capacity_limits() const noexcept
Definition plf_hive.h:2714
hive(const std::initializer_list< element_type > &element_list, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:574
void reshape(const hive_limits block_limits)
Definition plf_hive.h:2644
void clear() noexcept
Definition plf_hive.h:2760
element_type value_type
Definition plf_hive.h:134
friend reverse_iterator
Definition plf_hive.h:152
const_iterator begin() const noexcept
Definition plf_hive.h:620
void insert(const std::initializer_list< element_type > &element_list)
Definition plf_hive.h:1830
void splice(hive &&source)
Definition plf_hive.h:3142
void insert(const std::move_iterator< iterator_type > first, const std::move_iterator< iterator_type > last)
Definition plf_hive.h:1821
hive & operator=(const hive &source)
Definition plf_hive.h:2790
const_reverse_iterator crend() const noexcept
Definition plf_hive.h:690
bool empty() const noexcept
Definition plf_hive.h:2591
hive(const typename std::enable_if_t<!std::numeric_limits< iterator_type >::is_integer, iterator_type > &first, const iterator_type &last, const hive_limits block_limits, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:527
iterator end() noexcept
Definition plf_hive.h:627
void insert_range(range_type &&the_range)
Definition plf_hive.h:1839
const_reverse_iterator crbegin() const noexcept
Definition plf_hive.h:683
iterator emplace(arguments &&... parameters)
Definition plf_hive.h:1168
size_type size() const noexcept
Definition plf_hive.h:2598
hive(const size_type fill_number, const element_type &element, const hive_limits block_limits, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:473
size_type unique(comparison_function compare)
Definition plf_hive.h:3469
size_type max_size() const noexcept
Definition plf_hive.h:2605
hive(const hive_limits block_limits)
Definition plf_hive.h:394
friend const_reverse_iterator
Definition plf_hive.h:153
hive(const size_type fill_number, const element_type &element, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:491
hive(hive &&source) noexcept
Definition plf_hive.h:450
void assign(const std::initializer_list< element_type > &element_list)
Definition plf_hive.h:2575
reverse_iterator rbegin() noexcept
Definition plf_hive.h:655
static constexpr size_type max_block_capacity_per_allocation(const size_type allocation_amount) noexcept
Definition plf_hive.h:2728
void trim_capacity() noexcept
Definition plf_hive.h:2889
void shrink_to_fit()
Definition plf_hive.h:2872
hive(const hive &source)
Definition plf_hive.h:421
bool is_active(const const_iterator &it) const noexcept
Definition plf_hive.h:3117
hive_iterator< false > iterator
Definition plf_hive.h:144
void insert(const typename std::enable_if_t<!std::numeric_limits< iterator_type >::is_integer, iterator_type > first, const iterator_type last)
Definition plf_hive.h:1811
friend const_iterator
Definition plf_hive.h:147
iterator insert(const element_type &element)
Definition plf_hive.h:938
hive_reverse_iterator< true > const_reverse_iterator
Definition plf_hive.h:151
void reserve(size_type new_capacity)
Definition plf_hive.h:2986
std::allocator_traits< allocator_type >::difference_type difference_type
Definition plf_hive.h:136
size_type unique()
Definition plf_hive.h:3513
void assign_range(range_type &&the_range)
Definition plf_hive.h:2584
hive(const size_type fill_number, const hive_limits block_limits, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:499
hive_iterator< true > const_iterator
Definition plf_hive.h:145
const_iterator cbegin() const noexcept
Definition plf_hive.h:641
size_type capacity() const noexcept
Definition plf_hive.h:2612
~hive() noexcept
Definition plf_hive.h:697
void sort()
Definition plf_hive.h:3461
std::allocator_traits< allocator_type >::size_type size_type
Definition plf_hive.h:135
void trim_capacity(const size_type capacity_retain) noexcept
Definition plf_hive.h:2913
hive(const size_type fill_number, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:518
hive(const std::initializer_list< element_type > &element_list, const hive_limits block_limits, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:555
void assign(const size_type size, const element_type &element)
Definition plf_hive.h:2517
reverse_iterator rend() noexcept
Definition plf_hive.h:669
const_iterator end() const noexcept
Definition plf_hive.h:634
void swap(hive &source) noexcept(std::allocator_traits< allocator_type >::propagate_on_container_swap::value||std::allocator_traits< allocator_type >::is_always_equal::value)
Definition plf_hive.h:3520
constexpr hive() noexcept(noexcept(allocator_type()))
Definition plf_hive.h:370
element_type & reference
Definition plf_hive.h:137
const_reverse_iterator rend() const noexcept
Definition plf_hive.h:676
const element_type & const_reference
Definition plf_hive.h:138
std::allocator_traits< allocator_type >::const_pointer const_pointer
Definition plf_hive.h:140
void assign(const std::move_iterator< iterator_type > first, const std::move_iterator< iterator_type > last)
Definition plf_hive.h:2566
iterator begin() noexcept
Definition plf_hive.h:613
void insert(size_type size, const element_type &element)
Definition plf_hive.h:1459
hive(const allocator_type &alloc) noexcept
Definition plf_hive.h:354
void assign(const typename std::enable_if_t<!std::numeric_limits< iterator_type >::is_integer, iterator_type > first, const iterator_type last)
Definition plf_hive.h:2556
void sort(comparison_function compare)
Definition plf_hive.h:3375
iterator insert(element_type &&element)
Definition plf_hive.h:1053
static constexpr hive_limits block_capacity_hard_limits() noexcept
Definition plf_hive.h:2721
iterator erase(const const_iterator it)
Definition plf_hive.h:1890
void splice(hive &source)
Definition plf_hive.h:3333
hive(const hive_limits block_limits, const allocator_type &alloc)
Definition plf_hive.h:376
hive(plf::ranges::from_range_t, range_type &&rg, const hive_limits block_limits, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:584
hive(const hive &source, const std::type_identity_t< allocator_type > &alloc)
Definition plf_hive.h:402
iterator erase(const const_iterator iterator1, const const_iterator iterator2)
Definition plf_hive.h:2098
const_iterator cend() const noexcept
Definition plf_hive.h:648
const_reverse_iterator rbegin() const noexcept
Definition plf_hive.h:662
hive(const typename std::enable_if_t<!std::numeric_limits< iterator_type >::is_integer, iterator_type > &first, const iterator_type &last, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:547
iterator get_iterator(const pointer element_pointer) noexcept
Definition plf_hive.h:3103
const_iterator get_iterator(const const_pointer element_pointer) const noexcept
Definition plf_hive.h:3110
std::allocator_traits< allocator_type >::pointer pointer
Definition plf_hive.h:139
hive(plf::ranges::from_range_t, range_type &&rg, const allocator_type &alloc=allocator_type())
Definition plf_hive.h:605
allocator_type get_allocator() const noexcept
Definition plf_hive.h:3135
hive(hive &&source, const std::type_identity_t< allocator_type > &alloc)
Definition plf_hive.h:429
hive_reverse_iterator< false > reverse_iterator
Definition plf_hive.h:150
it_type::reverse_type rit
Definition plf_hive.h:4838
basic_size< coordinate > size
constexpr from_range_t from_range
Definition plf_hive.h:70
Definition plf_hive.h:58
Definition plf_hive.h:79
void swap(plf::hive< element_type, allocator_type > &a, plf::hive< element_type, allocator_type > &b) noexcept(std::allocator_traits< allocator_type >::propagate_on_container_swap::value||std::allocator_traits< allocator_type >::is_always_equal::value)
Definition plf_hive.h:4776
plf::hive< element_type, allocator_type >::size_type erase(plf::hive< element_type, allocator_type > &container, const element_type &value)
Definition plf_hive.h:4826
it_type next(it_type it, const typename iterator_traits< it_type >::difference_type distance=1)
Definition plf_hive.h:89
iterator_traits< it_type >::difference_type distance(const it_type first, const it_type last)
Definition plf_hive.h:107
plf::hive< element_type, allocator_type >::size_type erase_if(plf::hive< element_type, allocator_type > &container, predicate_function predicate)
Definition plf_hive.h:4784
void advance(it_type &it, const distance_type distance)
Definition plf_hive.h:81
it_type prev(it_type it, const typename iterator_traits< it_type >::difference_type distance=1)
Definition plf_hive.h:98
bool operator()(const element_type compare_value) const noexcept
Definition plf_hive.h:4759
hive_eq_to(const element_type store_value)
Definition plf_hive.h:4755
const element_type value
Definition plf_hive.h:4753
constexpr hive_limits(const size_t minimum, const size_t maximum) noexcept
Definition plf_hive.h:122