1 /**************************************************************************
3 * Copyright 2013-2014 RAD Game Tools and Valve Software
4 * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 **************************************************************************/
27 // File: vogl_vector.h
30 #include "vogl_core.h"
34 struct elemental_vector
40 typedef void (*object_mover)(void *pDst, void *pSrc, uint num);
42 bool increase_capacity(uint min_new_capacity, bool grow_hint, uint element_size, object_mover pRelocate, bool nofail);
46 class vector : public helpers::rel_ops<vector<T> >
50 typedef const T *const_iterator;
53 typedef const T &const_reference;
55 typedef const T *const_pointer;
64 inline vector(uint n, const T &init)
69 increase_capacity(n, false);
70 helpers::construct_array(m_p, n, init);
74 inline vector(const vector &other)
79 increase_capacity(other.m_size, false);
81 m_size = other.m_size;
83 if (type_is_bitwise_copyable())
84 memcpy(reinterpret_cast<void *>(m_p), reinterpret_cast<const void *>(other.m_p), m_size * sizeof(T));
88 const T *pSrc = other.m_p;
89 for (uint i = m_size; i > 0; i--)
90 helpers::construct(pDst++, *pSrc++);
94 inline explicit vector(uint size)
106 scalar_type<T>::destruct_array(m_p, m_size);
111 inline vector &operator=(const vector &other)
116 if (m_capacity >= other.m_size)
121 increase_capacity(other.m_size, false);
124 if (type_is_bitwise_copyable())
125 memcpy(reinterpret_cast<void *>(m_p), reinterpret_cast<const void *>(other.m_p), other.m_size * sizeof(T));
129 const T *pSrc = other.m_p;
130 for (uint i = other.m_size; i > 0; i--)
131 helpers::construct(pDst++, *pSrc++);
134 m_size = other.m_size;
139 inline const T *begin() const
148 inline const T *end() const
157 inline bool is_empty() const
161 inline uint size() const
165 inline uint size_in_bytes() const
167 return m_size * sizeof(T);
169 inline uint capacity() const
174 // operator[] will assert on out of range indices, but in final builds there is (and will never be) any range checking on this method.
175 inline const T &operator[](uint i) const
177 VOGL_ASSERT(i < m_size);
180 inline T &operator[](uint i)
182 VOGL_ASSERT(i < m_size);
186 // at() always includes range checking, even in final builds, unlike operator [].
187 // The first element is returned if the index is out of range.
188 inline const T &at(uint i) const
190 VOGL_ASSERT(i < m_size);
191 return (i >= m_size) ? m_p[0] : m_p[i];
195 VOGL_ASSERT(i < m_size);
196 return (i >= m_size) ? m_p[0] : m_p[i];
199 inline const T &front() const
210 inline const T &back() const
213 return m_p[m_size - 1];
218 return m_p[m_size - 1];
221 inline const T *get_ptr() const
230 inline const T *get_const_ptr() const
235 // clear() sets the container to empty, then frees the allocated block.
240 scalar_type<T>::destruct_array(m_p, m_size);
248 inline void clear_no_destruction()
259 inline void reserve(uint new_capacity)
261 if (new_capacity > m_capacity)
262 increase_capacity(new_capacity, false);
263 else if (new_capacity < m_capacity)
265 // Must work around the lack of a "decrease_capacity()" method.
266 // This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
268 tmp.increase_capacity(math::maximum(m_size, new_capacity), false);
274 inline bool try_reserve(uint new_capacity)
276 return increase_capacity(new_capacity, true, true);
279 // resize() will never shrink the array's capacity, only enlarge it.
280 // resize(0) empties the container, but does not free the allocated block.
281 // If grow_hint is true, the capacity will at least double on resizes (it will also double if you're only enlarging by 1 element, assuming you're push_back'ing).
282 inline void resize(uint new_size, bool grow_hint = false)
284 if (m_size != new_size)
286 if (new_size < m_size)
287 scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
290 if (new_size > m_capacity)
291 increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint);
293 scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
300 inline bool try_resize(uint new_size, bool grow_hint = false)
302 if (m_size != new_size)
304 if (new_size < m_size)
305 scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
308 if (new_size > m_capacity)
310 if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
314 scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
323 // Ensures array is large enough for element at index to be accessed.
324 inline void ensure_element_is_valid(uint index)
327 resize(index + 1, true);
330 // If size >= capacity/2, reset() sets the container's size to 0 but doesn't free the allocated block (because the container may be similarly loaded in the future).
331 // Otherwise it blows away the allocated block. See http://www.codercorner.com/blog/?p=494
334 if (m_size >= (m_capacity >> 1))
340 inline T *enlarge(uint i)
342 uint cur_size = m_size;
343 resize(cur_size + i, true);
344 return get_ptr() + cur_size;
347 inline T *try_enlarge(uint i)
349 uint cur_size = m_size;
350 if (!try_resize(cur_size + i, true))
352 return get_ptr() + cur_size;
355 inline void push_back(const T &obj)
357 VOGL_ASSERT(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
359 if (m_size >= m_capacity)
360 increase_capacity(m_size + 1, true);
362 scalar_type<T>::construct(m_p + m_size, obj);
366 inline bool try_push_back(const T &obj)
368 VOGL_ASSERT(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
370 if (m_size >= m_capacity)
372 if (!increase_capacity(m_size + 1, true, true))
376 scalar_type<T>::construct(m_p + m_size, obj);
382 inline void push_back_value(T obj)
384 if (m_size >= m_capacity)
385 increase_capacity(m_size + 1, true);
387 scalar_type<T>::construct(m_p + m_size, obj);
391 inline void pop_back()
398 scalar_type<T>::destruct(&m_p[m_size]);
402 inline void insert(uint index, const T *p, uint n)
404 VOGL_ASSERT(index <= m_size);
408 const uint orig_size = m_size;
409 resize(m_size + n, true);
411 const uint num_to_move = orig_size - index;
413 if (type_is_bitwise_copyable())
415 // This overwrites the destination object bits, but bitwise copyable means we don't need to worry about destruction.
416 memmove(reinterpret_cast<void *>(m_p + index + n), reinterpret_cast<const void *>(m_p + index), sizeof(T) * num_to_move);
420 const T *pSrc = m_p + orig_size - 1;
421 T *pDst = const_cast<T *>(pSrc) + n;
423 for (uint i = 0; i < num_to_move; i++)
425 VOGL_ASSERT((pDst - m_p) < (int)m_size);
430 T *pDst = m_p + index;
432 if (type_is_bitwise_copyable())
434 // This copies in the new bits, overwriting the existing objects, which is OK for copyable types that don't need destruction.
435 memcpy(reinterpret_cast<void *>(pDst), reinterpret_cast<const void *>(p), sizeof(T) * n);
439 for (uint i = 0; i < n; i++)
441 VOGL_ASSERT((pDst - m_p) < (int)m_size);
447 inline void insert(uint index, const T &obj)
449 VOGL_ASSERT(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
450 insert(index, &obj, 1);
453 // push_front() isn't going to be very fast - it's only here for usability.
454 inline void push_front(const T &obj)
459 vector &append(const vector &other)
461 VOGL_ASSERT(this != &other);
463 insert(m_size, &other[0], other.m_size);
467 vector &append(const T *p, uint n)
470 insert(m_size, p, n);
474 inline void erase(uint start, uint n)
476 VOGL_ASSERT((start + n) <= m_size);
477 if ((start + n) > m_size)
483 const uint num_to_move = m_size - (start + n);
485 T *pDst = m_p + start;
487 const T *pSrc = m_p + start + n;
489 if (type_is_bitwise_copyable_or_movable())
491 // This test is overly cautious.
492 if ((!type_is_bitwise_copyable()) || type_has_destructor())
494 // Type has been marked explictly as bitwise movable, which means we can move them around but they may need to be destructed.
495 // First destroy the erased objects.
496 scalar_type<T>::destruct_array(pDst, n);
499 // Copy "down" the objects to preserve, filling in the empty slots.
500 memmove(pDst, pSrc, num_to_move * sizeof(T));
504 // Type is not bitwise copyable or movable.
505 // Move them down one at a time by using the equals operator, and destroying anything that's left over at the end.
506 T *pDst_end = pDst + num_to_move;
507 while (pDst != pDst_end)
510 scalar_type<T>::destruct_array(pDst_end, n);
516 inline void erase(uint index)
521 inline void erase(T *p)
523 VOGL_ASSERT((p >= m_p) && (p < (m_p + m_size)));
524 erase(static_cast<uint>(p - m_p));
527 void erase_unordered(uint index)
529 VOGL_ASSERT(index < m_size);
531 if ((index + 1) < m_size)
532 (*this)[index] = back();
537 inline bool operator==(const vector &rhs) const
542 if (m_size != rhs.m_size)
546 if (scalar_type<T>::cFlag)
547 return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
551 const T *pDst = rhs.m_p;
552 for (uint i = m_size; i; i--)
553 if (!(*pSrc++ == *pDst++))
561 // lexicographical order
562 inline bool operator<(const vector &rhs) const
564 const uint min_size = math::minimum(m_size, rhs.m_size);
567 const T *pSrc_end = m_p + min_size;
568 const T *pDst = rhs.m_p;
570 while ((pSrc < pSrc_end) && (*pSrc == *pDst))
577 return *pSrc < *pDst;
579 return m_size < rhs.m_size;
582 inline void swap(vector &other)
584 utils::swap(m_p, other.m_p);
585 utils::swap(m_size, other.m_size);
586 utils::swap(m_capacity, other.m_capacity);
591 introsort(begin(), end());
594 template <typename Comp>
595 inline void sort(Comp compare_obj)
597 introsort(begin(), end(), compare_obj);
600 inline bool is_sorted() const
602 for (uint i = 1; i < m_size; ++i)
603 if (m_p[i] < m_p[i - 1])
614 resize(static_cast<uint>(std::unique(begin(), end()) - begin()));
618 template <typename EqComp>
619 inline void unique(EqComp comp)
625 resize(static_cast<uint>(std::unique(begin(), end(), comp) - begin()));
629 inline void reverse()
631 uint j = m_size >> 1;
632 for (uint i = 0; i < j; i++)
633 utils::swap(m_p[i], m_p[m_size - 1 - i]);
636 inline int find(const T &key) const
638 VOGL_ASSERT(m_size <= static_cast<uint>(cINT32_MAX));
641 const T *p_end = m_p + m_size;
654 return cInvalidIndex;
657 template <typename Q>
658 inline int find(const T &key, Q equal_to) const
660 VOGL_ASSERT(m_size <= static_cast<uint>(cINT32_MAX));
663 const T *p_end = m_p + m_size;
669 if (equal_to(key, *p))
676 return cInvalidIndex;
679 inline int find_last(const T &key) const
681 VOGL_ASSERT(m_size <= static_cast<uint>(cINT32_MAX));
683 for (int i = m_size - 1; i >= 0; --i)
687 return cInvalidIndex;
690 template <typename Q>
691 inline int find_last(const T &key, Q equal_to) const
693 VOGL_ASSERT(m_size <= static_cast<uint>(cINT32_MAX));
695 for (int i = m_size - 1; i >= 0; --i)
696 if (equal_to(m_p[i], key))
699 return cInvalidIndex;
702 inline int find_sorted(const T &key) const
704 VOGL_ASSERT(m_size <= static_cast<uint>(cINT32_MAX));
708 // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
709 // TODO: OK, is this worth it vs the usual algorithm?
710 int i = ((m_size + 1) >> 1) - 1;
715 VOGL_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
716 const T *pKey_i = m_p + i;
717 int cmp = key < *pKey_i;
718 #ifdef VOGL_BUILD_DEBUG
719 int cmp2 = *pKey_i < key;
720 VOGL_ASSERT((cmp != cmp2) || (key == *pKey_i));
722 if ((!cmp) && (key == *pKey_i))
728 i += (((m + 1) >> 1) ^ cmp) - cmp;
732 VOGL_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
735 #ifdef VOGL_BUILD_DEBUG
736 cmp2 = *pKey_i < key;
737 VOGL_ASSERT((cmp != cmp2) || (key == *pKey_i));
739 if ((!cmp) && (key == *pKey_i))
745 i += (((m + 1) >> 1) ^ cmp) - cmp;
751 return cInvalidIndex;
754 template <typename Q>
755 inline int find_sorted(const T &key, Q less_than) const
759 // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
760 // TODO: OK, is this worth it vs the usual algorithm?
761 int i = ((m_size + 1) >> 1) - 1;
766 VOGL_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
767 const T *pKey_i = m_p + i;
768 int cmp = less_than(key, *pKey_i);
769 #ifdef VOGL_BUILD_DEBUG
770 int cmp2 = less_than(*pKey_i, key);
771 VOGL_ASSERT((cmp != cmp2) || (key == *pKey_i));
773 if ((!cmp) && (!less_than(*pKey_i, key)))
779 i += (((m + 1) >> 1) ^ cmp) - cmp;
783 VOGL_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
785 cmp = less_than(key, *pKey_i);
786 #ifdef VOGL_BUILD_DEBUG
787 cmp2 = less_than(*pKey_i, key);
788 VOGL_ASSERT((cmp != cmp2) || (key == *pKey_i));
790 if ((!cmp) && (!less_than(*pKey_i, key)))
796 i += (((m + 1) >> 1) ^ cmp) - cmp;
802 return cInvalidIndex;
805 inline uint count_occurences(const T &key) const
810 const T *p_end = m_p + m_size;
823 inline void set_all(const T &o)
825 if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
826 memset(m_p, *reinterpret_cast<const uint8 *>(&o), m_size);
830 T *pDst_end = pDst + m_size;
831 while (pDst != pDst_end)
836 // Caller assumes ownership of the heap block associated with the container. Container is cleared.
837 inline void *assume_ownership()
846 // Caller is granting ownership of the indicated heap block.
847 // Block must have size constructed elements, and have enough room for capacity elements.
848 inline bool grant_ownership(T *p, uint size, uint capacity)
850 // To to prevent the caller from obviously shooting themselves in the foot.
851 if (((p + capacity) > m_p) && (p < (m_p + m_capacity)))
853 // Can grant ownership of a block inside the container itself!
881 m_capacity = capacity;
885 // Fisher–Yates shuffle
886 template <typename R>
887 inline void shuffle(R &rm)
889 VOGL_ASSERT(m_size < static_cast<uint>(cINT32_MAX));
891 if ((m_size <= 1U) || (m_size >= static_cast<uint>(cINT32_MAX)))
894 for (uint i = m_size - 1; i >= 1; --i)
896 uint j = rm.irand_inclusive(0, i);
897 std::swap(m_p[j], m_p[i]);
906 template <typename Q>
914 template <typename Q>
915 struct is_vector<vector<Q> >
923 static void object_mover(void *pDst_void, void *pSrc_void, uint num)
925 T *pSrc = static_cast<T *>(pSrc_void);
926 T *const pSrc_end = pSrc + num;
927 T *pDst = static_cast<T *>(pDst_void);
929 while (pSrc != pSrc_end)
932 new (static_cast<void *>(pDst)) T(*pSrc);
939 inline bool increase_capacity(uint min_new_capacity, bool grow_hint, bool nofail = false)
941 return reinterpret_cast<elemental_vector *>(this)->increase_capacity(
942 min_new_capacity, grow_hint, sizeof(T),
943 (type_is_bitwise_copyable_or_movable() || (is_vector<T>::cFlag)) ? NULL : object_mover, nofail);
946 static inline bool type_is_bitwise_copyable()
948 return VOGL_IS_BITWISE_COPYABLE(T);
951 static inline bool type_is_bitwise_copyable_or_movable()
953 return VOGL_IS_BITWISE_COPYABLE_OR_MOVABLE(T);
956 static inline bool type_has_destructor()
958 return VOGL_HAS_DESTRUCTOR(T);
962 typedef vogl::vector<uint8> uint8_vec;
963 typedef vogl::vector<char> char_vec;
964 typedef vogl::vector<int> int_vec;
965 typedef vogl::vector<uint> uint_vec;
966 typedef vogl::vector<int32> int32_vec;
967 typedef vogl::vector<uint32> uint32_vec;
968 typedef vogl::vector<int64_t> int64_vec;
969 typedef vogl::vector<uint64_t> uint64_vec;
970 typedef vogl::vector<float> float_vec;
971 typedef vogl::vector<double> double_vec;
972 typedef vogl::vector<void *> void_ptr_vec;
973 typedef vogl::vector<const void *> const_void_ptr_vec;
975 template <typename T>
976 struct bitwise_movable<vector<T> >
984 template <typename T>
985 inline void swap(vector<T> &a, vector<T> &b)
991 inline void vector_test()
994 for (uint i = 0; i < 100000; i++)
996 uint n = r.irand(0, 1000);
997 vogl::vector<int> xx(n);
999 for (int i = 0; i < n; i++)
1001 VOGL_VERIFY(xx.find_sorted(-1) < 0);
1002 for (uint i = 0; i < n; i++)
1004 VOGL_VERIFY(xx.find_sorted(2 * i) == i);
1005 VOGL_VERIFY(xx.find_sorted(2 * i + 1) < 0);
1007 VOGL_VERIFY(xx.find_sorted(n * 2) < 0);
1008 VOGL_VERIFY(xx.find_sorted(99999) < 0);
1017 template <typename T>
1018 inline void swap(vogl::vector<T> &a, vogl::vector<T> &b)