extlzham 0.0.1.PROTOTYPE3-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.md +27 -0
  3. data/README.md +74 -0
  4. data/Rakefile +152 -0
  5. data/contrib/lzham/LICENSE +22 -0
  6. data/contrib/lzham/README.md +209 -0
  7. data/contrib/lzham/include/lzham.h +781 -0
  8. data/contrib/lzham/lzhamcomp/lzham_comp.h +38 -0
  9. data/contrib/lzham/lzhamcomp/lzham_lzbase.cpp +244 -0
  10. data/contrib/lzham/lzhamcomp/lzham_lzbase.h +45 -0
  11. data/contrib/lzham/lzhamcomp/lzham_lzcomp.cpp +608 -0
  12. data/contrib/lzham/lzhamcomp/lzham_lzcomp_internal.cpp +1966 -0
  13. data/contrib/lzham/lzhamcomp/lzham_lzcomp_internal.h +472 -0
  14. data/contrib/lzham/lzhamcomp/lzham_lzcomp_state.cpp +1413 -0
  15. data/contrib/lzham/lzhamcomp/lzham_match_accel.cpp +562 -0
  16. data/contrib/lzham/lzhamcomp/lzham_match_accel.h +146 -0
  17. data/contrib/lzham/lzhamcomp/lzham_null_threading.h +97 -0
  18. data/contrib/lzham/lzhamcomp/lzham_pthreads_threading.cpp +229 -0
  19. data/contrib/lzham/lzhamcomp/lzham_pthreads_threading.h +520 -0
  20. data/contrib/lzham/lzhamcomp/lzham_threading.h +12 -0
  21. data/contrib/lzham/lzhamcomp/lzham_win32_threading.cpp +220 -0
  22. data/contrib/lzham/lzhamcomp/lzham_win32_threading.h +368 -0
  23. data/contrib/lzham/lzhamdecomp/lzham_assert.cpp +66 -0
  24. data/contrib/lzham/lzhamdecomp/lzham_assert.h +40 -0
  25. data/contrib/lzham/lzhamdecomp/lzham_checksum.cpp +73 -0
  26. data/contrib/lzham/lzhamdecomp/lzham_checksum.h +13 -0
  27. data/contrib/lzham/lzhamdecomp/lzham_config.h +23 -0
  28. data/contrib/lzham/lzhamdecomp/lzham_core.h +264 -0
  29. data/contrib/lzham/lzhamdecomp/lzham_decomp.h +37 -0
  30. data/contrib/lzham/lzhamdecomp/lzham_helpers.h +54 -0
  31. data/contrib/lzham/lzhamdecomp/lzham_huffman_codes.cpp +262 -0
  32. data/contrib/lzham/lzhamdecomp/lzham_huffman_codes.h +14 -0
  33. data/contrib/lzham/lzhamdecomp/lzham_lzdecomp.cpp +1527 -0
  34. data/contrib/lzham/lzhamdecomp/lzham_lzdecompbase.cpp +131 -0
  35. data/contrib/lzham/lzhamdecomp/lzham_lzdecompbase.h +89 -0
  36. data/contrib/lzham/lzhamdecomp/lzham_math.h +142 -0
  37. data/contrib/lzham/lzhamdecomp/lzham_mem.cpp +284 -0
  38. data/contrib/lzham/lzhamdecomp/lzham_mem.h +112 -0
  39. data/contrib/lzham/lzhamdecomp/lzham_platform.cpp +157 -0
  40. data/contrib/lzham/lzhamdecomp/lzham_platform.h +284 -0
  41. data/contrib/lzham/lzhamdecomp/lzham_prefix_coding.cpp +351 -0
  42. data/contrib/lzham/lzhamdecomp/lzham_prefix_coding.h +146 -0
  43. data/contrib/lzham/lzhamdecomp/lzham_symbol_codec.cpp +1484 -0
  44. data/contrib/lzham/lzhamdecomp/lzham_symbol_codec.h +556 -0
  45. data/contrib/lzham/lzhamdecomp/lzham_timer.cpp +147 -0
  46. data/contrib/lzham/lzhamdecomp/lzham_timer.h +99 -0
  47. data/contrib/lzham/lzhamdecomp/lzham_traits.h +141 -0
  48. data/contrib/lzham/lzhamdecomp/lzham_types.h +97 -0
  49. data/contrib/lzham/lzhamdecomp/lzham_utils.h +58 -0
  50. data/contrib/lzham/lzhamdecomp/lzham_vector.cpp +75 -0
  51. data/contrib/lzham/lzhamdecomp/lzham_vector.h +588 -0
  52. data/contrib/lzham/lzhamlib/lzham_lib.cpp +179 -0
  53. data/examples/basic.rb +48 -0
  54. data/ext/constants.c +64 -0
  55. data/ext/decoder.c +313 -0
  56. data/ext/depend +5 -0
  57. data/ext/encoder.c +372 -0
  58. data/ext/error.c +80 -0
  59. data/ext/extconf.rb +29 -0
  60. data/ext/extlzham.c +34 -0
  61. data/ext/extlzham.h +62 -0
  62. data/gemstub.rb +22 -0
  63. data/lib/2.0/extlzham.so +0 -0
  64. data/lib/2.1/extlzham.so +0 -0
  65. data/lib/2.2/extlzham.so +0 -0
  66. data/lib/extlzham.rb +158 -0
  67. data/lib/extlzham/version.rb +5 -0
  68. data/test/test_extlzham.rb +35 -0
  69. metadata +156 -0
@@ -0,0 +1,58 @@
1
+ // File: lzham_utils.h
2
+ // See Copyright Notice and license at the end of include/lzham.h
3
+ #pragma once
4
+
5
+ #define LZHAM_GET_ALIGNMENT(v) ((!sizeof(v)) ? 1 : (__alignof(v) ? __alignof(v) : sizeof(uint32)))
6
+
7
+ #define LZHAM_MIN(a, b) (((a) < (b)) ? (a) : (b))
8
+ #define LZHAM_MAX(a, b) (((a) < (b)) ? (b) : (a))
9
+
10
+ template<class T, size_t N> T decay_array_to_subtype(T (&a)[N]);
11
+ #define LZHAM_ARRAY_SIZE(X) (sizeof(X) / sizeof(decay_array_to_subtype(X)))
12
+
13
+ namespace lzham
14
+ {
15
+ namespace utils
16
+ {
17
+ template<typename T> inline void swap(T& l, T& r)
18
+ {
19
+ T temp(l);
20
+ l = r;
21
+ r = temp;
22
+ }
23
+
24
+ template<typename T> inline void zero_object(T& obj)
25
+ {
26
+ memset(&obj, 0, sizeof(obj));
27
+ }
28
+
29
+ static inline uint32 swap32(uint32 x) { return ((x << 24U) | ((x << 8U) & 0x00FF0000U) | ((x >> 8U) & 0x0000FF00U) | (x >> 24U)); }
30
+
31
+ inline uint count_leading_zeros16(uint v)
32
+ {
33
+ LZHAM_ASSERT(v < 0x10000);
34
+
35
+ uint temp;
36
+ uint n = 16;
37
+
38
+ temp = v >> 8;
39
+ if (temp) { n -= 8; v = temp; }
40
+
41
+ temp = v >> 4;
42
+ if (temp) { n -= 4; v = temp; }
43
+
44
+ temp = v >> 2;
45
+ if (temp) { n -= 2; v = temp; }
46
+
47
+ temp = v >> 1;
48
+ if (temp) { n -= 1; v = temp; }
49
+
50
+ if (v & 1) n--;
51
+
52
+ return n;
53
+ }
54
+
55
+ } // namespace utils
56
+
57
+ } // namespace lzham
58
+
@@ -0,0 +1,75 @@
1
+ // File: lzham_vector.cpp
2
+ // See Copyright Notice and license at the end of include/lzham.h
3
+ #include "lzham_core.h"
4
+ #include "lzham_vector.h"
5
+
6
+ namespace lzham
7
+ {
8
+ bool elemental_vector::increase_capacity(uint min_new_capacity, bool grow_hint, uint element_size, object_mover pMover, bool nofail)
9
+ {
10
+ LZHAM_ASSERT(m_size <= m_capacity);
11
+
12
+ #if LZHAM_64BIT_POINTERS
13
+ LZHAM_ASSUME(sizeof(void*) == sizeof(uint64));
14
+ LZHAM_ASSERT(min_new_capacity < (0x400000000ULL / element_size));
15
+ #else
16
+ LZHAM_ASSUME(sizeof(void*) == sizeof(uint32));
17
+ LZHAM_ASSERT(min_new_capacity < (0x7FFF0000U / element_size));
18
+ #endif
19
+
20
+ if (m_capacity >= min_new_capacity)
21
+ return true;
22
+
23
+ // new_capacity must be 64-bit when compiling on x64.
24
+ size_t new_capacity = (size_t)min_new_capacity;
25
+ if ((grow_hint) && (!math::is_power_of_2(static_cast<uint64>(new_capacity))))
26
+ new_capacity = static_cast<uint>(math::next_pow2(static_cast<uint64>(new_capacity)));
27
+
28
+ LZHAM_ASSERT(new_capacity && (new_capacity > m_capacity));
29
+
30
+ const size_t desired_size = element_size * new_capacity;
31
+ size_t actual_size;
32
+ if (!pMover)
33
+ {
34
+ void* new_p = lzham_realloc(m_p, desired_size, &actual_size, true);
35
+ if (!new_p)
36
+ {
37
+ if (nofail)
38
+ return false;
39
+
40
+ char buf[256];
41
+ sprintf_s(buf, sizeof(buf), "vector: lzham_realloc() failed allocating %u bytes", desired_size);
42
+ LZHAM_FAIL(buf);
43
+ }
44
+ m_p = new_p;
45
+ }
46
+ else
47
+ {
48
+ void* new_p = lzham_malloc(desired_size, &actual_size);
49
+ if (!new_p)
50
+ {
51
+ if (nofail)
52
+ return false;
53
+
54
+ char buf[256];
55
+ sprintf_s(buf, sizeof(buf), "vector: lzham_malloc() failed allocating %u bytes", desired_size);
56
+ LZHAM_FAIL(buf);
57
+ }
58
+
59
+ (*pMover)(new_p, m_p, m_size);
60
+
61
+ if (m_p)
62
+ lzham_free(m_p);
63
+
64
+ m_p = new_p;
65
+ }
66
+
67
+ if (actual_size > desired_size)
68
+ m_capacity = static_cast<uint>(actual_size / element_size);
69
+ else
70
+ m_capacity = static_cast<uint>(new_capacity);
71
+
72
+ return true;
73
+ }
74
+
75
+ } // namespace lzham
@@ -0,0 +1,588 @@
1
+ // File: lzham_vector.h
2
+ // See Copyright Notice and license at the end of include/lzham.h
3
+ #pragma once
4
+
5
+ namespace lzham
6
+ {
7
+ struct elemental_vector
8
+ {
9
+ void* m_p;
10
+ uint m_size;
11
+ uint m_capacity;
12
+
13
+ typedef void (*object_mover)(void* pDst, void* pSrc, uint num);
14
+
15
+ bool increase_capacity(uint min_new_capacity, bool grow_hint, uint element_size, object_mover pRelocate, bool nofail);
16
+ };
17
+
18
+ template<typename T>
19
+ class vector : public helpers::rel_ops< vector<T> >
20
+ {
21
+ public:
22
+ typedef T* iterator;
23
+ typedef const T* const_iterator;
24
+ typedef T value_type;
25
+ typedef T& reference;
26
+ typedef const T& const_reference;
27
+ typedef T* pointer;
28
+ typedef const T* const_pointer;
29
+
30
+ inline vector() :
31
+ m_p(NULL),
32
+ m_size(0),
33
+ m_capacity(0)
34
+ {
35
+ }
36
+
37
+ inline vector(uint n, const T& init) :
38
+ m_p(NULL),
39
+ m_size(0),
40
+ m_capacity(0)
41
+ {
42
+ increase_capacity(n, false);
43
+ helpers::construct_array(m_p, n, init);
44
+ m_size = n;
45
+ }
46
+
47
+ inline vector(const vector& other) :
48
+ m_p(NULL),
49
+ m_size(0),
50
+ m_capacity(0)
51
+ {
52
+ increase_capacity(other.m_size, false);
53
+
54
+ m_size = other.m_size;
55
+
56
+ if (LZHAM_IS_BITWISE_COPYABLE(T))
57
+ memcpy(m_p, other.m_p, m_size * sizeof(T));
58
+ else
59
+ {
60
+ T* pDst = m_p;
61
+ const T* pSrc = other.m_p;
62
+ for (uint i = m_size; i > 0; i--)
63
+ helpers::construct(pDst++, *pSrc++);
64
+ }
65
+ }
66
+
67
+ inline explicit vector(uint size) :
68
+ m_p(NULL),
69
+ m_size(0),
70
+ m_capacity(0)
71
+ {
72
+ try_resize(size);
73
+ }
74
+
75
+ inline ~vector()
76
+ {
77
+ if (m_p)
78
+ {
79
+ scalar_type<T>::destruct_array(m_p, m_size);
80
+ lzham_free(m_p);
81
+ }
82
+ }
83
+
84
+ inline vector& operator= (const vector& other)
85
+ {
86
+ if (this == &other)
87
+ return *this;
88
+
89
+ if (m_capacity >= other.m_size)
90
+ try_resize(0);
91
+ else
92
+ {
93
+ clear();
94
+ if (!increase_capacity(other.m_size, false))
95
+ {
96
+ LZHAM_FAIL("lzham::vector operator=: Out of memory!");
97
+ return *this;
98
+ }
99
+ }
100
+
101
+ if (LZHAM_IS_BITWISE_COPYABLE(T))
102
+ memcpy(m_p, other.m_p, other.m_size * sizeof(T));
103
+ else
104
+ {
105
+ T* pDst = m_p;
106
+ const T* pSrc = other.m_p;
107
+ for (uint i = other.m_size; i > 0; i--)
108
+ helpers::construct(pDst++, *pSrc++);
109
+ }
110
+
111
+ m_size = other.m_size;
112
+
113
+ return *this;
114
+ }
115
+
116
+ inline const T* begin() const { return m_p; }
117
+ T* begin() { return m_p; }
118
+
119
+ inline const T* end() const { return m_p + m_size; }
120
+ T* end() { return m_p + m_size; }
121
+
122
+ inline bool empty() const { return !m_size; }
123
+ inline uint size() const { return m_size; }
124
+ inline uint size_in_bytes() const { return m_size * sizeof(T); }
125
+ inline uint capacity() const { return m_capacity; }
126
+
127
+ // operator[] will assert on out of range indices, but in final builds there is (and will never be) any range checking on this method.
128
+ inline const T& operator[] (uint i) const { LZHAM_ASSERT(i < m_size); return m_p[i]; }
129
+ inline T& operator[] (uint i) { LZHAM_ASSERT(i < m_size); return m_p[i]; }
130
+
131
+ // at() always includes range checking, even in final builds, unlike operator [].
132
+ // The first element is returned if the index is out of range.
133
+ inline const T& at(uint i) const { LZHAM_ASSERT(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
134
+ inline T& at(uint i) { LZHAM_ASSERT(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
135
+
136
+ inline const T& front() const { LZHAM_ASSERT(m_size); return m_p[0]; }
137
+ inline T& front() { LZHAM_ASSERT(m_size); return m_p[0]; }
138
+
139
+ inline const T& back() const { LZHAM_ASSERT(m_size); return m_p[m_size - 1]; }
140
+ inline T& back() { LZHAM_ASSERT(m_size); return m_p[m_size - 1]; }
141
+
142
+ inline const T* get_ptr() const { return m_p; }
143
+ inline T* get_ptr() { return m_p; }
144
+
145
+ inline void clear()
146
+ {
147
+ if (m_p)
148
+ {
149
+ scalar_type<T>::destruct_array(m_p, m_size);
150
+ lzham_free(m_p);
151
+ m_p = NULL;
152
+ m_size = 0;
153
+ m_capacity = 0;
154
+ }
155
+ }
156
+
157
+ inline void clear_no_destruction()
158
+ {
159
+ if (m_p)
160
+ {
161
+ lzham_free(m_p);
162
+ m_p = NULL;
163
+ m_size = 0;
164
+ m_capacity = 0;
165
+ }
166
+ }
167
+
168
+ inline bool try_reserve(uint new_capacity)
169
+ {
170
+ return increase_capacity(new_capacity, true, true);
171
+ }
172
+
173
+ inline bool try_resize(uint new_size, bool grow_hint = false)
174
+ {
175
+ if (m_size != new_size)
176
+ {
177
+ if (new_size < m_size)
178
+ scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
179
+ else
180
+ {
181
+ if (new_size > m_capacity)
182
+ {
183
+ if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
184
+ return false;
185
+ }
186
+
187
+ scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
188
+ }
189
+
190
+ m_size = new_size;
191
+ }
192
+
193
+ return true;
194
+ }
195
+
196
+ inline bool try_resize_no_construct(uint new_size, bool grow_hint = false)
197
+ {
198
+ if (new_size > m_capacity)
199
+ {
200
+ if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
201
+ return false;
202
+ }
203
+
204
+ m_size = new_size;
205
+
206
+ return true;
207
+ }
208
+
209
+ inline T* try_enlarge(uint i)
210
+ {
211
+ uint cur_size = m_size;
212
+ if (!try_resize(cur_size + i, true))
213
+ return NULL;
214
+ return get_ptr() + cur_size;
215
+ }
216
+
217
+ inline bool try_push_back(const T& obj)
218
+ {
219
+ LZHAM_ASSERT(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
220
+
221
+ if (m_size >= m_capacity)
222
+ {
223
+ if (!increase_capacity(m_size + 1, true, true))
224
+ return false;
225
+ }
226
+
227
+ scalar_type<T>::construct(m_p + m_size, obj);
228
+ m_size++;
229
+
230
+ return true;
231
+ }
232
+
233
+ inline void pop_back()
234
+ {
235
+ LZHAM_ASSERT(m_size);
236
+
237
+ if (m_size)
238
+ {
239
+ m_size--;
240
+ scalar_type<T>::destruct(&m_p[m_size]);
241
+ }
242
+ }
243
+
244
+ inline bool insert(uint index, const T* p, uint n)
245
+ {
246
+ LZHAM_ASSERT(index <= m_size);
247
+ if (!n)
248
+ return true;
249
+
250
+ const uint orig_size = m_size;
251
+ if (!try_resize(m_size + n, true))
252
+ return false;
253
+
254
+ const uint num_to_move = orig_size - index;
255
+ if (num_to_move)
256
+ {
257
+ if (LZHAM_IS_BITWISE_COPYABLE(T))
258
+ memmove(m_p + index + n, m_p + index, sizeof(T) * num_to_move);
259
+ else
260
+ {
261
+ const T* pSrc = m_p + orig_size - 1;
262
+ T* pDst = const_cast<T*>(pSrc) + n;
263
+
264
+ for (uint i = 0; i < num_to_move; i++)
265
+ {
266
+ LZHAM_ASSERT((pDst - m_p) < (int)m_size);
267
+ *pDst-- = *pSrc--;
268
+ }
269
+ }
270
+ }
271
+
272
+ T* pDst = m_p + index;
273
+
274
+ if (LZHAM_IS_BITWISE_COPYABLE(T))
275
+ memcpy(pDst, p, sizeof(T) * n);
276
+ else
277
+ {
278
+ for (uint i = 0; i < n; i++)
279
+ {
280
+ LZHAM_ASSERT((pDst - m_p) < (int)m_size);
281
+ *pDst++ = *p++;
282
+ }
283
+ }
284
+
285
+ return true;
286
+ }
287
+
288
+ // push_front() isn't going to be very fast - it's only here for usability.
289
+ inline bool try_push_front(const T& obj)
290
+ {
291
+ return insert(0, &obj, 1);
292
+ }
293
+
294
+ bool append(const vector& other)
295
+ {
296
+ if (other.m_size)
297
+ return insert(m_size, &other[0], other.m_size);
298
+ return true;
299
+ }
300
+
301
+ bool append(const T* p, uint n)
302
+ {
303
+ if (n)
304
+ return insert(m_size, p, n);
305
+ return true;
306
+ }
307
+
308
+ inline void erase(uint start, uint n)
309
+ {
310
+ LZHAM_ASSERT((start + n) <= m_size);
311
+ if ((start + n) > m_size)
312
+ return;
313
+
314
+ if (!n)
315
+ return;
316
+
317
+ const uint num_to_move = m_size - (start + n);
318
+
319
+ T* pDst = m_p + start;
320
+
321
+ const T* pSrc = m_p + start + n;
322
+
323
+ if (LZHAM_IS_BITWISE_COPYABLE(T))
324
+ memmove(pDst, pSrc, num_to_move * sizeof(T));
325
+ else
326
+ {
327
+ T* pDst_end = pDst + num_to_move;
328
+
329
+ while (pDst != pDst_end)
330
+ *pDst++ = *pSrc++;
331
+
332
+ scalar_type<T>::destruct_array(pDst_end, n);
333
+ }
334
+
335
+ m_size -= n;
336
+ }
337
+
338
+ inline void erase(uint index)
339
+ {
340
+ erase(index, 1);
341
+ }
342
+
343
+ inline void erase(T* p)
344
+ {
345
+ LZHAM_ASSERT((p >= m_p) && (p < (m_p + m_size)));
346
+ erase(static_cast<uint>(p - m_p));
347
+ }
348
+
349
+ void erase_unordered(uint index)
350
+ {
351
+ LZHAM_ASSERT(index < m_size);
352
+
353
+ if ((index + 1) < m_size)
354
+ (*this)[index] = back();
355
+
356
+ pop_back();
357
+ }
358
+
359
+ inline bool operator== (const vector& rhs) const
360
+ {
361
+ if (m_size != rhs.m_size)
362
+ return false;
363
+ else if (m_size)
364
+ {
365
+ if (scalar_type<T>::cFlag)
366
+ return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
367
+ else
368
+ {
369
+ const T* pSrc = m_p;
370
+ const T* pDst = rhs.m_p;
371
+ for (uint i = m_size; i; i--)
372
+ if (!(*pSrc++ == *pDst++))
373
+ return false;
374
+ }
375
+ }
376
+
377
+ return true;
378
+ }
379
+
380
+ inline bool operator< (const vector& rhs) const
381
+ {
382
+ const uint min_size = math::minimum(m_size, rhs.m_size);
383
+
384
+ const T* pSrc = m_p;
385
+ const T* pSrc_end = m_p + min_size;
386
+ const T* pDst = rhs.m_p;
387
+
388
+ while ((pSrc < pSrc_end) && (*pSrc == *pDst))
389
+ {
390
+ pSrc++;
391
+ pDst++;
392
+ }
393
+
394
+ if (pSrc < pSrc_end)
395
+ return *pSrc < *pDst;
396
+
397
+ return m_size < rhs.m_size;
398
+ }
399
+
400
+ inline void swap(vector& other)
401
+ {
402
+ utils::swap(m_p, other.m_p);
403
+ utils::swap(m_size, other.m_size);
404
+ utils::swap(m_capacity, other.m_capacity);
405
+ }
406
+
407
+ inline void sort()
408
+ {
409
+ std::sort(begin(), end());
410
+ }
411
+
412
+ inline void unique()
413
+ {
414
+ if (!empty())
415
+ {
416
+ sort();
417
+
418
+ resize(std::unique(begin(), end()) - begin());
419
+ }
420
+ }
421
+
422
+ inline void reverse()
423
+ {
424
+ uint j = m_size >> 1;
425
+ for (uint i = 0; i < j; i++)
426
+ utils::swap(m_p[i], m_p[m_size - 1 - i]);
427
+ }
428
+
429
+ inline int find(const T& key) const
430
+ {
431
+ const T* p = m_p;
432
+ const T* p_end = m_p + m_size;
433
+
434
+ uint index = 0;
435
+
436
+ while (p != p_end)
437
+ {
438
+ if (key == *p)
439
+ return index;
440
+
441
+ p++;
442
+ index++;
443
+ }
444
+
445
+ return cInvalidIndex;
446
+ }
447
+
448
+ inline int find_sorted(const T& key) const
449
+ {
450
+ if (m_size)
451
+ {
452
+ // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
453
+ int i = ((m_size + 1) >> 1) - 1;
454
+ int m = m_size;
455
+
456
+ for ( ; ; )
457
+ {
458
+ LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
459
+ const T* pKey_i = m_p + i;
460
+ int cmp = key < *pKey_i;
461
+ if ((!cmp) && (key == *pKey_i)) return i;
462
+ m >>= 1;
463
+ if (!m) break;
464
+ cmp = -cmp;
465
+ i += (((m + 1) >> 1) ^ cmp) - cmp;
466
+
467
+ LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
468
+ pKey_i = m_p + i;
469
+ cmp = key < *pKey_i;
470
+ if ((!cmp) && (key == *pKey_i)) return i;
471
+ m >>= 1;
472
+ if (!m) break;
473
+ cmp = -cmp;
474
+ i += (((m + 1) >> 1) ^ cmp) - cmp;
475
+ }
476
+ }
477
+
478
+ return cInvalidIndex;
479
+ }
480
+
481
+ template<typename Q>
482
+ inline int find_sorted(const T& key, Q less_than) const
483
+ {
484
+ if (m_size)
485
+ {
486
+ // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
487
+ int i = ((m_size + 1) >> 1) - 1;
488
+ int m = m_size;
489
+
490
+ for ( ; ; )
491
+ {
492
+ LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
493
+ const T* pKey_i = m_p + i;
494
+ int cmp = less_than(key, *pKey_i);
495
+ if ((!cmp) && (!less_than(*pKey_i, key))) return i;
496
+ m >>= 1;
497
+ if (!m) break;
498
+ cmp = -cmp;
499
+ i += (((m + 1) >> 1) ^ cmp) - cmp;
500
+
501
+ LZHAM_ASSERT_OPEN_RANGE(i, 0, (int)m_size);
502
+ pKey_i = m_p + i;
503
+ cmp = less_than(key, *pKey_i);
504
+ if ((!cmp) && (!less_than(*pKey_i, key))) return i;
505
+ m >>= 1;
506
+ if (!m) break;
507
+ cmp = -cmp;
508
+ i += (((m + 1) >> 1) ^ cmp) - cmp;
509
+ }
510
+ }
511
+
512
+ return cInvalidIndex;
513
+ }
514
+
515
+ inline uint count_occurences(const T& key) const
516
+ {
517
+ uint c = 0;
518
+
519
+ const T* p = m_p;
520
+ const T* p_end = m_p + m_size;
521
+
522
+ while (p != p_end)
523
+ {
524
+ if (key == *p)
525
+ c++;
526
+
527
+ p++;
528
+ }
529
+
530
+ return c;
531
+ }
532
+
533
+ inline void set_all(const T& o)
534
+ {
535
+ if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
536
+ memset(m_p, *reinterpret_cast<const uint8*>(&o), m_size);
537
+ else
538
+ {
539
+ T* pDst = m_p;
540
+ T* pDst_end = pDst + m_size;
541
+ while (pDst != pDst_end)
542
+ *pDst++ = o;
543
+ }
544
+ }
545
+
546
+ private:
547
+ T* m_p;
548
+ uint m_size;
549
+ uint m_capacity;
550
+
551
+ template<typename Q> struct is_vector { enum { cFlag = false }; };
552
+ template<typename Q> struct is_vector< vector<Q> > { enum { cFlag = true }; };
553
+
554
+ static void object_mover(void* pDst_void, void* pSrc_void, uint num)
555
+ {
556
+ T* pSrc = static_cast<T*>(pSrc_void);
557
+ T* const pSrc_end = pSrc + num;
558
+ T* pDst = static_cast<T*>(pDst_void);
559
+
560
+ while (pSrc != pSrc_end)
561
+ {
562
+ new (static_cast<void*>(pDst)) T(*pSrc);
563
+ pSrc->~T();
564
+ pSrc++;
565
+ pDst++;
566
+ }
567
+ }
568
+
569
+ inline bool increase_capacity(uint min_new_capacity, bool grow_hint, bool nofail = false)
570
+ {
571
+ return reinterpret_cast<elemental_vector*>(this)->increase_capacity(
572
+ min_new_capacity, grow_hint, sizeof(T),
573
+ (LZHAM_IS_BITWISE_MOVABLE(T) || (is_vector<T>::cFlag)) ? NULL : object_mover, nofail);
574
+ }
575
+ };
576
+
577
+ template<typename T> struct bitwise_movable< vector<T> > { enum { cFlag = true }; };
578
+
579
+ extern void vector_test();
580
+
581
+ template<typename T>
582
+ inline void swap(vector<T>& a, vector<T>& b)
583
+ {
584
+ a.swap(b);
585
+ }
586
+
587
+ } // namespace lzham
588
+