google_hash 0.8.1 → 0.8.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (121) hide show
  1. data/ChangeLog.txt +2 -0
  2. data/VERSION +1 -1
  3. data/ext/clean.bat +0 -0
  4. data/ext/clean.sh +4 -0
  5. data/ext/extconf.rb +4 -5
  6. data/ext/go.bat +0 -0
  7. data/ext/sparsehash-2.0.2/AUTHORS +2 -0
  8. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/COPYING +0 -0
  9. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/ChangeLog +60 -0
  10. data/ext/sparsehash-2.0.2/INSTALL +365 -0
  11. data/ext/sparsehash-2.0.2/Makefile +1336 -0
  12. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/Makefile.am +97 -40
  13. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/Makefile.in +538 -256
  14. data/ext/sparsehash-2.0.2/NEWS +188 -0
  15. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/README +4 -10
  16. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/README_windows.txt +3 -3
  17. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/TODO +0 -0
  18. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/aclocal.m4 +266 -166
  19. data/ext/sparsehash-2.0.2/allocator.patch +31 -0
  20. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/config.guess +235 -234
  21. data/ext/sparsehash-2.0.2/config.status +1238 -0
  22. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/config.sub +198 -64
  23. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/configure +1118 -1000
  24. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/configure.ac +4 -5
  25. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/depcomp +136 -36
  26. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/dense_hash_map.html +182 -67
  27. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/dense_hash_set.html +173 -74
  28. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/designstyle.css +0 -6
  29. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/implementation.html +0 -0
  30. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/index.html +4 -5
  31. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/performance.html +1 -1
  32. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/sparse_hash_map.html +190 -58
  33. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/sparse_hash_set.html +180 -65
  34. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/doc/sparsetable.html +1 -1
  35. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/experimental/Makefile +0 -0
  36. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/experimental/README +0 -0
  37. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/experimental/example.c +1 -0
  38. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/experimental/libchash.c +1 -0
  39. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/experimental/libchash.h +1 -0
  40. data/ext/sparsehash-2.0.2/install-sh +520 -0
  41. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/m4/acx_pthread.m4 +34 -0
  42. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/m4/google_namespace.m4 +0 -0
  43. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/m4/namespaces.m4 +0 -0
  44. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/m4/stl_hash.m4 +0 -0
  45. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/m4/stl_hash_fun.m4 +0 -0
  46. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/missing +60 -44
  47. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb.sh +0 -0
  48. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/README +0 -0
  49. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/changelog +42 -0
  50. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/compat +0 -0
  51. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/control +1 -1
  52. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/copyright +5 -4
  53. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/docs +0 -0
  54. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/deb/rules +0 -0
  55. data/ext/sparsehash-2.0.2/packages/deb/sparsehash.dirs +5 -0
  56. data/ext/sparsehash-2.0.2/packages/deb/sparsehash.install +6 -0
  57. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/rpm.sh +1 -1
  58. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/packages/rpm/rpm.spec +5 -3
  59. data/ext/{sparsehash-1.8.1/google-sparsehash.sln → sparsehash-2.0.2/sparsehash.sln} +0 -0
  60. data/ext/sparsehash-2.0.2/src/config.h +132 -0
  61. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/config.h.in +0 -3
  62. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/config.h.include +0 -1
  63. data/ext/sparsehash-2.0.2/src/google/dense_hash_map +34 -0
  64. data/ext/sparsehash-2.0.2/src/google/dense_hash_set +34 -0
  65. data/ext/sparsehash-2.0.2/src/google/sparse_hash_map +34 -0
  66. data/ext/sparsehash-2.0.2/src/google/sparse_hash_set +34 -0
  67. data/ext/sparsehash-2.0.2/src/google/sparsehash/densehashtable.h +34 -0
  68. data/ext/sparsehash-2.0.2/src/google/sparsehash/hashtable-common.h +34 -0
  69. data/ext/sparsehash-2.0.2/src/google/sparsehash/libc_allocator_with_realloc.h +34 -0
  70. data/ext/sparsehash-2.0.2/src/google/sparsehash/sparsehashtable.h +34 -0
  71. data/ext/sparsehash-2.0.2/src/google/sparsetable +34 -0
  72. data/ext/sparsehash-2.0.2/src/google/template_util.h +34 -0
  73. data/ext/sparsehash-2.0.2/src/google/type_traits.h +34 -0
  74. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/hash_test_interface.h +64 -37
  75. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/hashtable_test.cc +415 -141
  76. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/libc_allocator_with_realloc_test.cc +16 -23
  77. data/ext/sparsehash-2.0.2/src/simple_compat_test.cc +106 -0
  78. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/simple_test.cc +8 -5
  79. data/ext/{sparsehash-1.8.1/src/google → sparsehash-2.0.2/src/sparsehash}/dense_hash_map +80 -37
  80. data/ext/{sparsehash-1.8.1/src/google → sparsehash-2.0.2/src/sparsehash}/dense_hash_set +64 -34
  81. data/ext/{sparsehash-1.8.1/src/google/sparsehash → sparsehash-2.0.2/src/sparsehash/internal}/densehashtable.h +247 -173
  82. data/ext/sparsehash-2.0.2/src/sparsehash/internal/hashtable-common.h +381 -0
  83. data/ext/{sparsehash-1.8.1/src/google/sparsehash → sparsehash-2.0.2/src/sparsehash/internal}/libc_allocator_with_realloc.h +5 -7
  84. data/ext/{sparsehash-1.8.1/src/google/sparsehash → sparsehash-2.0.2/src/sparsehash/internal}/sparsehashtable.h +154 -93
  85. data/ext/{sparsehash-1.8.1/src/google → sparsehash-2.0.2/src/sparsehash}/sparse_hash_map +96 -36
  86. data/ext/{sparsehash-1.8.1/src/google → sparsehash-2.0.2/src/sparsehash}/sparse_hash_set +85 -32
  87. data/ext/{sparsehash-1.8.1/src/google → sparsehash-2.0.2/src/sparsehash}/sparsetable +520 -258
  88. data/ext/sparsehash-2.0.2/src/sparsehash/template_util.h +134 -0
  89. data/ext/{sparsehash-1.8.1/src/google → sparsehash-2.0.2/src/sparsehash}/type_traits.h +153 -35
  90. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/sparsetable_unittest.cc +108 -22
  91. data/ext/sparsehash-2.0.2/src/stamp-h1 +1 -0
  92. data/ext/sparsehash-2.0.2/src/template_util_unittest.cc +134 -0
  93. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/testutil.h +16 -1
  94. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/time_hash_map.cc +259 -94
  95. data/ext/sparsehash-2.0.2/src/type_traits_unittest.cc +636 -0
  96. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/windows/config.h +4 -4
  97. data/ext/sparsehash-2.0.2/src/windows/google/sparsehash/sparseconfig.h +49 -0
  98. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/windows/port.cc +1 -0
  99. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/src/windows/port.h +4 -13
  100. data/ext/sparsehash-2.0.2/src/windows/sparsehash/internal/sparseconfig.h +49 -0
  101. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/vsprojects/hashtable_test/hashtable_test.vcproj +11 -11
  102. data/ext/sparsehash-2.0.2/vsprojects/libc_allocator_with_realloc_test/libc_allocator_with_realloc_test.vcproj +161 -0
  103. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/vsprojects/simple_test/simple_test.vcproj +10 -10
  104. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj +4 -4
  105. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/vsprojects/time_hash_map/time_hash_map.vcproj +10 -10
  106. data/ext/{sparsehash-1.8.1 → sparsehash-2.0.2}/vsprojects/type_traits_unittest/type_traits_unittest.vcproj +3 -3
  107. data/ext/spec.bat +0 -0
  108. data/ext/template/google_hash.cpp.erb +6 -5
  109. metadata +106 -86
  110. data/ext/sparsehash-1.8.1/AUTHORS +0 -2
  111. data/ext/sparsehash-1.8.1/INSTALL +0 -236
  112. data/ext/sparsehash-1.8.1/NEWS +0 -71
  113. data/ext/sparsehash-1.8.1/compile +0 -99
  114. data/ext/sparsehash-1.8.1/install-sh +0 -323
  115. data/ext/sparsehash-1.8.1/m4/stl_namespace.m4 +0 -25
  116. data/ext/sparsehash-1.8.1/mkinstalldirs +0 -158
  117. data/ext/sparsehash-1.8.1/packages/deb/sparsehash.dirs +0 -2
  118. data/ext/sparsehash-1.8.1/packages/deb/sparsehash.install +0 -2
  119. data/ext/sparsehash-1.8.1/src/google/sparsehash/hashtable-common.h +0 -178
  120. data/ext/sparsehash-1.8.1/src/type_traits_unittest.cc +0 -502
  121. data/ext/sparsehash-1.8.1/src/windows/google/sparsehash/sparseconfig.h +0 -32
@@ -1,10 +1,10 @@
1
1
  // Copyright (c) 2005, Google Inc.
2
2
  // All rights reserved.
3
- //
3
+ //
4
4
  // Redistribution and use in source and binary forms, with or without
5
5
  // modification, are permitted provided that the following conditions are
6
6
  // met:
7
- //
7
+ //
8
8
  // * Redistributions of source code must retain the above copyright
9
9
  // notice, this list of conditions and the following disclaimer.
10
10
  // * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
14
14
  // * Neither the name of Google Inc. nor the names of its
15
15
  // contributors may be used to endorse or promote products derived from
16
16
  // this software without specific prior written permission.
17
- //
17
+ //
18
18
  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
19
  // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
20
  // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -28,7 +28,6 @@
28
28
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
29
 
30
30
  // ---
31
- // Author: Craig Silverstein
32
31
  //
33
32
  // This is just a very thin wrapper over sparsehashtable.h, just
34
33
  // like sgi stl's stl_hash_map is a very thin wrapper over
@@ -85,35 +84,31 @@
85
84
  #ifndef _SPARSE_HASH_MAP_H_
86
85
  #define _SPARSE_HASH_MAP_H_
87
86
 
88
- #include <google/sparsehash/sparseconfig.h>
89
- #include <stdio.h> // for FILE * in read()/write()
90
- #include <algorithm> // for the default template args
91
- #include <functional> // for equal_to
92
- #include <memory> // for alloc<>
93
- #include <utility> // for pair<>
94
- #include HASH_FUN_H // defined in config.h
95
- #include <google/sparsehash/libc_allocator_with_realloc.h>
96
- #include <google/sparsehash/sparsehashtable.h>
97
-
98
-
87
+ #include <sparsehash/internal/sparseconfig.h>
88
+ #include <algorithm> // needed by stl_alloc
89
+ #include <functional> // for equal_to<>, select1st<>, etc
90
+ #include <memory> // for alloc
91
+ #include <utility> // for pair<>
92
+ #include <sparsehash/internal/libc_allocator_with_realloc.h>
93
+ #include <sparsehash/internal/sparsehashtable.h> // IWYU pragma: export
94
+ #include HASH_FUN_H // for hash<>
99
95
  _START_GOOGLE_NAMESPACE_
100
96
 
101
- using STL_NAMESPACE::pair;
102
-
103
97
  template <class Key, class T,
104
98
  class HashFcn = SPARSEHASH_HASH<Key>, // defined in sparseconfig.h
105
- class EqualKey = STL_NAMESPACE::equal_to<Key>,
106
- class Alloc = libc_allocator_with_realloc<pair<const Key, T> > >
99
+ class EqualKey = std::equal_to<Key>,
100
+ class Alloc = libc_allocator_with_realloc<std::pair<const Key, T> > >
107
101
  class sparse_hash_map {
108
102
  private:
109
103
  // Apparently select1st is not stl-standard, so we define our own
110
104
  struct SelectKey {
111
- const Key& operator()(const pair<const Key, T>& p) const {
105
+ typedef const Key& result_type;
106
+ const Key& operator()(const std::pair<const Key, T>& p) const {
112
107
  return p.first;
113
108
  }
114
109
  };
115
110
  struct SetKey {
116
- void operator()(pair<const Key, T>* value, const Key& new_key) const {
111
+ void operator()(std::pair<const Key, T>* value, const Key& new_key) const {
117
112
  *const_cast<Key*>(&value->first) = new_key;
118
113
  // It would be nice to clear the rest of value here as well, in
119
114
  // case it's taking up a lot of memory. We do this by clearing
@@ -121,9 +116,15 @@ class sparse_hash_map {
121
116
  value->second = T();
122
117
  }
123
118
  };
119
+ // For operator[].
120
+ struct DefaultValue {
121
+ std::pair<const Key, T> operator()(const Key& key) {
122
+ return std::make_pair(key, T());
123
+ }
124
+ };
124
125
 
125
126
  // The actual data
126
- typedef sparse_hashtable<pair<const Key, T>, Key, HashFcn, SelectKey,
127
+ typedef sparse_hashtable<std::pair<const Key, T>, Key, HashFcn, SelectKey,
127
128
  SetKey, EqualKey, Alloc> ht;
128
129
  ht rep;
129
130
 
@@ -242,26 +243,33 @@ class sparse_hash_map {
242
243
  // If key is in the hashtable, returns find(key)->second,
243
244
  // otherwise returns insert(value_type(key, T()).first->second.
244
245
  // Note it does not create an empty T unless the find fails.
245
- return rep.template find_or_insert<data_type>(key);
246
+ return rep.template find_or_insert<DefaultValue>(key).second;
246
247
  }
247
248
 
248
249
  size_type count(const key_type& key) const { return rep.count(key); }
249
250
 
250
- pair<iterator, iterator> equal_range(const key_type& key) {
251
+ std::pair<iterator, iterator> equal_range(const key_type& key) {
251
252
  return rep.equal_range(key);
252
253
  }
253
- pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
254
+ std::pair<const_iterator, const_iterator> equal_range(const key_type& key)
255
+ const {
254
256
  return rep.equal_range(key);
255
257
  }
256
258
 
257
259
  // Insertion routines
258
- pair<iterator, bool> insert(const value_type& obj) { return rep.insert(obj); }
259
- template <class InputIterator>
260
- void insert(InputIterator f, InputIterator l) { rep.insert(f, l); }
261
- void insert(const_iterator f, const_iterator l) { rep.insert(f, l); }
262
- // required for std::insert_iterator; the passed-in iterator is ignored
263
- iterator insert(iterator, const value_type& obj) { return insert(obj).first; }
264
-
260
+ std::pair<iterator, bool> insert(const value_type& obj) {
261
+ return rep.insert(obj);
262
+ }
263
+ template <class InputIterator> void insert(InputIterator f, InputIterator l) {
264
+ rep.insert(f, l);
265
+ }
266
+ void insert(const_iterator f, const_iterator l) {
267
+ rep.insert(f, l);
268
+ }
269
+ // Required for std::insert_iterator; the passed-in iterator is ignored.
270
+ iterator insert(iterator, const value_type& obj) {
271
+ return insert(obj).first;
272
+ }
265
273
 
266
274
  // Deletion routines
267
275
  // THESE ARE NON-STANDARD! I make you specify an "impossible" key
@@ -285,10 +293,62 @@ class sparse_hash_map {
285
293
 
286
294
 
287
295
  // I/O -- this is an add-on for writing metainformation to disk
288
- bool write_metadata(FILE *fp) { return rep.write_metadata(fp); }
289
- bool read_metadata(FILE *fp) { return rep.read_metadata(fp); }
290
- bool write_nopointer_data(FILE *fp) { return rep.write_nopointer_data(fp); }
291
- bool read_nopointer_data(FILE *fp) { return rep.read_nopointer_data(fp); }
296
+ //
297
+ // For maximum flexibility, this does not assume a particular
298
+ // file type (though it will probably be a FILE *). We just pass
299
+ // the fp through to rep.
300
+
301
+ // If your keys and values are simple enough, you can pass this
302
+ // serializer to serialize()/unserialize(). "Simple enough" means
303
+ // value_type is a POD type that contains no pointers. Note,
304
+ // however, we don't try to normalize endianness.
305
+ typedef typename ht::NopointerSerializer NopointerSerializer;
306
+
307
+ // serializer: a class providing operator()(OUTPUT*, const value_type&)
308
+ // (writing value_type to OUTPUT). You can specify a
309
+ // NopointerSerializer object if appropriate (see above).
310
+ // fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a
311
+ // pointer to a class providing size_t Write(const void*, size_t),
312
+ // which writes a buffer into a stream (which fp presumably
313
+ // owns) and returns the number of bytes successfully written.
314
+ // Note basic_ostream<not_char> is not currently supported.
315
+ template <typename ValueSerializer, typename OUTPUT>
316
+ bool serialize(ValueSerializer serializer, OUTPUT* fp) {
317
+ return rep.serialize(serializer, fp);
318
+ }
319
+
320
+ // serializer: a functor providing operator()(INPUT*, value_type*)
321
+ // (reading from INPUT and into value_type). You can specify a
322
+ // NopointerSerializer object if appropriate (see above).
323
+ // fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a
324
+ // pointer to a class providing size_t Read(void*, size_t),
325
+ // which reads into a buffer from a stream (which fp presumably
326
+ // owns) and returns the number of bytes successfully read.
327
+ // Note basic_istream<not_char> is not currently supported.
328
+ // NOTE: Since value_type is std::pair<const Key, T>, ValueSerializer
329
+ // may need to do a const cast in order to fill in the key.
330
+ // NOTE: if Key or T are not POD types, the serializer MUST use
331
+ // placement-new to initialize their values, rather than a normal
332
+ // equals-assignment or similar. (The value_type* passed into the
333
+ // serializer points to garbage memory.)
334
+ template <typename ValueSerializer, typename INPUT>
335
+ bool unserialize(ValueSerializer serializer, INPUT* fp) {
336
+ return rep.unserialize(serializer, fp);
337
+ }
338
+
339
+ // The four methods below are DEPRECATED.
340
+ // Use serialize() and unserialize() for new code.
341
+ template <typename OUTPUT>
342
+ bool write_metadata(OUTPUT *fp) { return rep.write_metadata(fp); }
343
+
344
+ template <typename INPUT>
345
+ bool read_metadata(INPUT *fp) { return rep.read_metadata(fp); }
346
+
347
+ template <typename OUTPUT>
348
+ bool write_nopointer_data(OUTPUT *fp) { return rep.write_nopointer_data(fp); }
349
+
350
+ template <typename INPUT>
351
+ bool read_nopointer_data(INPUT *fp) { return rep.read_nopointer_data(fp); }
292
352
  };
293
353
 
294
354
  // We need a global swap as well
@@ -1,10 +1,10 @@
1
1
  // Copyright (c) 2005, Google Inc.
2
2
  // All rights reserved.
3
- //
3
+ //
4
4
  // Redistribution and use in source and binary forms, with or without
5
5
  // modification, are permitted provided that the following conditions are
6
6
  // met:
7
- //
7
+ //
8
8
  // * Redistributions of source code must retain the above copyright
9
9
  // notice, this list of conditions and the following disclaimer.
10
10
  // * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
14
14
  // * Neither the name of Google Inc. nor the names of its
15
15
  // contributors may be used to endorse or promote products derived from
16
16
  // this software without specific prior written permission.
17
- //
17
+ //
18
18
  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
19
  // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
20
  // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -28,7 +28,6 @@
28
28
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
29
 
30
30
  // ---
31
- // Author: Craig Silverstein
32
31
  //
33
32
  // This is just a very thin wrapper over sparsehashtable.h, just
34
33
  // like sgi stl's stl_hash_set is a very thin wrapper over
@@ -89,29 +88,26 @@
89
88
  #ifndef _SPARSE_HASH_SET_H_
90
89
  #define _SPARSE_HASH_SET_H_
91
90
 
92
- #include <google/sparsehash/sparseconfig.h>
93
- #include <stdio.h> // for FILE * in read()/write()
94
- #include <algorithm> // for the default template args
95
- #include <functional> // for equal_to
96
- #include <memory> // for alloc<>
97
- #include <utility> // for pair<>
98
- #include HASH_FUN_H // defined in config.h
99
- #include <google/sparsehash/libc_allocator_with_realloc.h>
100
- #include <google/sparsehash/sparsehashtable.h>
91
+ #include <sparsehash/internal/sparseconfig.h>
92
+ #include <algorithm> // needed by stl_alloc
93
+ #include <functional> // for equal_to<>
94
+ #include <memory> // for alloc (which we don't use)
95
+ #include <utility> // for pair<>
96
+ #include <sparsehash/internal/libc_allocator_with_realloc.h>
97
+ #include <sparsehash/internal/sparsehashtable.h> // IWYU pragma: export
98
+ #include HASH_FUN_H // for hash<>
101
99
 
102
100
  _START_GOOGLE_NAMESPACE_
103
101
 
104
- using STL_NAMESPACE::pair;
105
-
106
102
  template <class Value,
107
- class HashFcn = SPARSEHASH_HASH<Value>, // defined in sparseconfig.h
108
- class EqualKey = STL_NAMESPACE::equal_to<Value>,
103
+ class HashFcn = SPARSEHASH_HASH<Value>, // defined in sparseconfig.h
104
+ class EqualKey = std::equal_to<Value>,
109
105
  class Alloc = libc_allocator_with_realloc<Value> >
110
106
  class sparse_hash_set {
111
107
  private:
112
108
  // Apparently identity is not stl-standard, so we define our own
113
109
  struct Identity {
114
- Value& operator()(Value& v) const { return v; }
110
+ typedef const Value& result_type;
115
111
  const Value& operator()(const Value& v) const { return v; }
116
112
  };
117
113
  struct SetKey {
@@ -232,21 +228,26 @@ class sparse_hash_set {
232
228
 
233
229
  size_type count(const key_type& key) const { return rep.count(key); }
234
230
 
235
- pair<iterator, iterator> equal_range(const key_type& key) const {
231
+ std::pair<iterator, iterator> equal_range(const key_type& key) const {
236
232
  return rep.equal_range(key);
237
233
  }
238
234
 
235
+
239
236
  // Insertion routines
240
- pair<iterator, bool> insert(const value_type& obj) {
241
- pair<typename ht::iterator, bool> p = rep.insert(obj);
242
- return pair<iterator, bool>(p.first, p.second); // const to non-const
237
+ std::pair<iterator, bool> insert(const value_type& obj) {
238
+ std::pair<typename ht::iterator, bool> p = rep.insert(obj);
239
+ return std::pair<iterator, bool>(p.first, p.second); // const to non-const
240
+ }
241
+ template <class InputIterator> void insert(InputIterator f, InputIterator l) {
242
+ rep.insert(f, l);
243
+ }
244
+ void insert(const_iterator f, const_iterator l) {
245
+ rep.insert(f, l);
246
+ }
247
+ // Required for std::insert_iterator; the passed-in iterator is ignored.
248
+ iterator insert(iterator, const value_type& obj) {
249
+ return insert(obj).first;
243
250
  }
244
- template <class InputIterator>
245
- void insert(InputIterator f, InputIterator l) { rep.insert(f, l); }
246
- void insert(const_iterator f, const_iterator l) { rep.insert(f, l); }
247
- // required for std::insert_iterator; the passed-in iterator is ignored
248
- iterator insert(iterator, const value_type& obj) { return insert(obj).first; }
249
-
250
251
 
251
252
  // Deletion routines
252
253
  // THESE ARE NON-STANDARD! I make you specify an "impossible" key
@@ -268,10 +269,62 @@ class sparse_hash_set {
268
269
 
269
270
 
270
271
  // I/O -- this is an add-on for writing metainformation to disk
271
- bool write_metadata(FILE *fp) { return rep.write_metadata(fp); }
272
- bool read_metadata(FILE *fp) { return rep.read_metadata(fp); }
273
- bool write_nopointer_data(FILE *fp) { return rep.write_nopointer_data(fp); }
274
- bool read_nopointer_data(FILE *fp) { return rep.read_nopointer_data(fp); }
272
+ //
273
+ // For maximum flexibility, this does not assume a particular
274
+ // file type (though it will probably be a FILE *). We just pass
275
+ // the fp through to rep.
276
+
277
+ // If your keys and values are simple enough, you can pass this
278
+ // serializer to serialize()/unserialize(). "Simple enough" means
279
+ // value_type is a POD type that contains no pointers. Note,
280
+ // however, we don't try to normalize endianness.
281
+ typedef typename ht::NopointerSerializer NopointerSerializer;
282
+
283
+ // serializer: a class providing operator()(OUTPUT*, const value_type&)
284
+ // (writing value_type to OUTPUT). You can specify a
285
+ // NopointerSerializer object if appropriate (see above).
286
+ // fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a
287
+ // pointer to a class providing size_t Write(const void*, size_t),
288
+ // which writes a buffer into a stream (which fp presumably
289
+ // owns) and returns the number of bytes successfully written.
290
+ // Note basic_ostream<not_char> is not currently supported.
291
+ template <typename ValueSerializer, typename OUTPUT>
292
+ bool serialize(ValueSerializer serializer, OUTPUT* fp) {
293
+ return rep.serialize(serializer, fp);
294
+ }
295
+
296
+ // serializer: a functor providing operator()(INPUT*, value_type*)
297
+ // (reading from INPUT and into value_type). You can specify a
298
+ // NopointerSerializer object if appropriate (see above).
299
+ // fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a
300
+ // pointer to a class providing size_t Read(void*, size_t),
301
+ // which reads into a buffer from a stream (which fp presumably
302
+ // owns) and returns the number of bytes successfully read.
303
+ // Note basic_istream<not_char> is not currently supported.
304
+ // NOTE: Since value_type is const Key, ValueSerializer
305
+ // may need to do a const cast in order to fill in the key.
306
+ // NOTE: if Key is not a POD type, the serializer MUST use
307
+ // placement-new to initialize its value, rather than a normal
308
+ // equals-assignment or similar. (The value_type* passed into
309
+ // the serializer points to garbage memory.)
310
+ template <typename ValueSerializer, typename INPUT>
311
+ bool unserialize(ValueSerializer serializer, INPUT* fp) {
312
+ return rep.unserialize(serializer, fp);
313
+ }
314
+
315
+ // The four methods below are DEPRECATED.
316
+ // Use serialize() and unserialize() for new code.
317
+ template <typename OUTPUT>
318
+ bool write_metadata(OUTPUT *fp) { return rep.write_metadata(fp); }
319
+
320
+ template <typename INPUT>
321
+ bool read_metadata(INPUT *fp) { return rep.read_metadata(fp); }
322
+
323
+ template <typename OUTPUT>
324
+ bool write_nopointer_data(OUTPUT *fp) { return rep.write_nopointer_data(fp); }
325
+
326
+ template <typename INPUT>
327
+ bool read_nopointer_data(INPUT *fp) { return rep.read_nopointer_data(fp); }
275
328
  };
276
329
 
277
330
  template <class Val, class HashFcn, class EqualKey, class Alloc>
@@ -1,10 +1,10 @@
1
1
  // Copyright (c) 2005, Google Inc.
2
2
  // All rights reserved.
3
- //
3
+ //
4
4
  // Redistribution and use in source and binary forms, with or without
5
5
  // modification, are permitted provided that the following conditions are
6
6
  // met:
7
- //
7
+ //
8
8
  // * Redistributions of source code must retain the above copyright
9
9
  // notice, this list of conditions and the following disclaimer.
10
10
  // * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
14
14
  // * Neither the name of Google Inc. nor the names of its
15
15
  // contributors may be used to endorse or promote products derived from
16
16
  // this software without specific prior written permission.
17
- //
17
+ //
18
18
  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
19
  // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
20
  // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -28,7 +28,7 @@
28
28
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
29
 
30
30
  // ---
31
- // Author: Craig Silverstein
31
+ //
32
32
  //
33
33
  // A sparsetable is a random container that implements a sparse array,
34
34
  // that is, an array that uses very little memory to store unassigned
@@ -50,39 +50,203 @@
50
50
  // because of this container's memory economy, each insert and delete
51
51
  // causes a memory reallocation.
52
52
  //
53
- // See doc/sparsetable.html for information about how to use this class.
53
+ // NOTE: You should not test(), get(), or set() any index that is
54
+ // greater than sparsetable.size(). If you need to do that, call
55
+ // resize() first.
56
+ //
57
+ // --- Template parameters
58
+ // PARAMETER DESCRIPTION DEFAULT
59
+ // T The value of the array: the type of --
60
+ // object that is stored in the array.
61
+ //
62
+ // GROUP_SIZE How large each "group" in the table 48
63
+ // is (see below). Larger values use
64
+ // a little less memory but cause most
65
+ // operations to be a little slower
66
+ //
67
+ // Alloc: Allocator to use to allocate memory. libc_allocator_with_realloc
68
+ //
69
+ // --- Model of
70
+ // Random Access Container
71
+ //
72
+ // --- Type requirements
73
+ // T must be Copy Constructible. It need not be Assignable.
74
+ //
75
+ // --- Public base classes
76
+ // None.
77
+ //
78
+ // --- Members
79
+ // Type members
80
+ //
81
+ // MEMBER WHERE DEFINED DESCRIPTION
82
+ // value_type container The type of object, T, stored in the array
83
+ // allocator_type container Allocator to use
84
+ // pointer container Pointer to p
85
+ // const_pointer container Const pointer to p
86
+ // reference container Reference to t
87
+ // const_reference container Const reference to t
88
+ // size_type container An unsigned integral type
89
+ // difference_type container A signed integral type
90
+ // iterator [*] container Iterator used to iterate over a sparsetable
91
+ // const_iterator container Const iterator used to iterate over a table
92
+ // reverse_iterator reversible Iterator used to iterate backwards over
93
+ // container a sparsetable
94
+ // const_reverse_iterator reversible container Guess
95
+ // nonempty_iterator [+] sparsetable Iterates over assigned
96
+ // array elements only
97
+ // const_nonempty_iterator sparsetable Iterates over assigned
98
+ // array elements only
99
+ // reverse_nonempty_iterator sparsetable Iterates backwards over
100
+ // assigned array elements only
101
+ // const_reverse_nonempty_iterator sparsetable Iterates backwards over
102
+ // assigned array elements only
103
+ //
104
+ // [*] All iterators are const in a sparsetable (though nonempty_iterators
105
+ // may not be). Use get() and set() to assign values, not iterators.
106
+ //
107
+ // [+] iterators are random-access iterators. nonempty_iterators are
108
+ // bidirectional iterators.
54
109
 
55
- #ifndef _SPARSETABLE_H_
56
- #define _SPARSETABLE_H_
110
+ // Iterator members
111
+ // MEMBER WHERE DEFINED DESCRIPTION
112
+ //
113
+ // iterator begin() container An iterator to the beginning of the table
114
+ // iterator end() container An iterator to the end of the table
115
+ // const_iterator container A const_iterator pointing to the
116
+ // begin() const beginning of a sparsetable
117
+ // const_iterator container A const_iterator pointing to the
118
+ // end() const end of a sparsetable
119
+ //
120
+ // reverse_iterator reversable Points to beginning of a reversed
121
+ // rbegin() container sparsetable
122
+ // reverse_iterator reversable Points to end of a reversed table
123
+ // rend() container
124
+ // const_reverse_iterator reversable Points to beginning of a
125
+ // rbegin() const container reversed sparsetable
126
+ // const_reverse_iterator reversable Points to end of a reversed table
127
+ // rend() const container
128
+ //
129
+ // nonempty_iterator sparsetable Points to first assigned element
130
+ // begin() of a sparsetable
131
+ // nonempty_iterator sparsetable Points past last assigned element
132
+ // end() of a sparsetable
133
+ // const_nonempty_iterator sparsetable Points to first assigned element
134
+ // begin() const of a sparsetable
135
+ // const_nonempty_iterator sparsetable Points past last assigned element
136
+ // end() const of a sparsetable
137
+ //
138
+ // reverse_nonempty_iterator sparsetable Points to first assigned element
139
+ // begin() of a reversed sparsetable
140
+ // reverse_nonempty_iterator sparsetable Points past last assigned element
141
+ // end() of a reversed sparsetable
142
+ // const_reverse_nonempty_iterator sparsetable Points to first assigned
143
+ // begin() const elt of a reversed sparsetable
144
+ // const_reverse_nonempty_iterator sparsetable Points past last assigned
145
+ // end() const elt of a reversed sparsetable
146
+ //
147
+ //
148
+ // Other members
149
+ // MEMBER WHERE DEFINED DESCRIPTION
150
+ // sparsetable() sparsetable A table of size 0; must resize()
151
+ // before using.
152
+ // sparsetable(size_type size) sparsetable A table of size size. All
153
+ // indices are unassigned.
154
+ // sparsetable(
155
+ // const sparsetable &tbl) sparsetable Copy constructor
156
+ // ~sparsetable() sparsetable The destructor
157
+ // sparsetable &operator=( sparsetable The assignment operator
158
+ // const sparsetable &tbl)
159
+ //
160
+ // void resize(size_type size) sparsetable Grow or shrink a table to
161
+ // have size indices [*]
162
+ //
163
+ // void swap(sparsetable &x) sparsetable Swap two sparsetables
164
+ // void swap(sparsetable &x, sparsetable Swap two sparsetables
165
+ // sparsetable &y) (global, not member, function)
166
+ //
167
+ // size_type size() const sparsetable Number of "buckets" in the table
168
+ // size_type max_size() const sparsetable Max allowed size of a sparsetable
169
+ // bool empty() const sparsetable true if size() == 0
170
+ // size_type num_nonempty() const sparsetable Number of assigned "buckets"
171
+ //
172
+ // const_reference get( sparsetable Value at index i, or default
173
+ // size_type i) const value if i is unassigned
174
+ // const_reference operator[]( sparsetable Identical to get(i) [+]
175
+ // difference_type i) const
176
+ // reference set(size_type i, sparsetable Set element at index i to
177
+ // const_reference val) be a copy of val
178
+ // bool test(size_type i) sparsetable True if element at index i
179
+ // const has been assigned to
180
+ // bool test(iterator pos) sparsetable True if element pointed to
181
+ // const by pos has been assigned to
182
+ // void erase(iterator pos) sparsetable Set element pointed to by
183
+ // pos to be unassigned [!]
184
+ // void erase(size_type i) sparsetable Set element i to be unassigned
185
+ // void erase(iterator start, sparsetable Erases all elements between
186
+ // iterator end) start and end
187
+ // void clear() sparsetable Erases all elements in the table
188
+ //
189
+ // I/O versions exist for both FILE* and for File* (Google2-style files):
190
+ // bool write_metadata(FILE *fp) sparsetable Writes a sparsetable to the
191
+ // bool write_metadata(File *fp) given file. true if write
192
+ // completes successfully
193
+ // bool read_metadata(FILE *fp) sparsetable Replaces sparsetable with
194
+ // bool read_metadata(File *fp) version read from fp. true
195
+ // if read completes sucessfully
196
+ // bool write_nopointer_data(FILE *fp) Read/write the data stored in
197
+ // bool read_nopointer_data(FILE*fp) the table, if it's simple
198
+ //
199
+ // bool operator==( forward Tests two tables for equality.
200
+ // const sparsetable &t1, container This is a global function,
201
+ // const sparsetable &t2) not a member function.
202
+ // bool operator<( forward Lexicographical comparison.
203
+ // const sparsetable &t1, container This is a global function,
204
+ // const sparsetable &t2) not a member function.
205
+ //
206
+ // [*] If you shrink a sparsetable using resize(), assigned elements
207
+ // past the end of the table are removed using erase(). If you grow
208
+ // a sparsetable, new unassigned indices are created.
209
+ //
210
+ // [+] Note that operator[] returns a const reference. You must use
211
+ // set() to change the value of a table element.
212
+ //
213
+ // [!] Unassignment also calls the destructor.
214
+ //
215
+ // Iterators are invalidated whenever an item is inserted or
216
+ // deleted (ie set() or erase() is used) or when the size of
217
+ // the table changes (ie resize() or clear() is used).
218
+ //
219
+ // See doc/sparsetable.html for more information about how to use this class.
220
+
221
+ // Note: this uses STL style for naming, rather than Google naming.
222
+ // That's because this is an STL-y container
223
+
224
+ #ifndef UTIL_GTL_SPARSETABLE_H_
225
+ #define UTIL_GTL_SPARSETABLE_H_
57
226
 
58
- #include <google/sparsehash/sparseconfig.h>
227
+ #include <sparsehash/internal/sparseconfig.h>
59
228
  #include <stdlib.h> // for malloc/free
60
229
  #include <stdio.h> // to read/write tables
230
+ #include <string.h> // for memcpy
61
231
  #ifdef HAVE_STDINT_H
62
- #include <stdint.h> // the normal place uint16_t is defined
232
+ #include <stdint.h> // the normal place uint16_t is defined
63
233
  #endif
64
234
  #ifdef HAVE_SYS_TYPES_H
65
- #include <sys/types.h> // the normal place u_int16_t is defined
235
+ #include <sys/types.h> // the normal place u_int16_t is defined
66
236
  #endif
67
237
  #ifdef HAVE_INTTYPES_H
68
- #include <inttypes.h> // a third place for uint16_t or u_int16_t
238
+ #include <inttypes.h> // a third place for uint16_t or u_int16_t
69
239
  #endif
70
240
  #include <assert.h> // for bounds checking
71
241
  #include <iterator> // to define reverse_iterator for me
72
242
  #include <algorithm> // equal, lexicographical_compare, swap,...
73
- #include <memory> // uninitialized_copy
243
+ #include <memory> // uninitialized_copy, uninitialized_fill
74
244
  #include <vector> // a sparsetable is a vector of groups
75
- #include <google/sparsehash/libc_allocator_with_realloc.h>
76
- #include <google/type_traits.h> // for true_type, integral_constant, etc.
77
-
78
- #if STDC_HEADERS
79
- #include <string.h> // for memcpy
80
- #else
81
- #if !HAVE_MEMCPY
82
- #define memcpy(d, s, n) bcopy ((s), (d), (n))
83
- #endif
84
- #endif
245
+ #include <sparsehash/type_traits.h>
246
+ #include <sparsehash/internal/hashtable-common.h>
247
+ #include <sparsehash/internal/libc_allocator_with_realloc.h>
85
248
 
249
+ // A lot of work to get a type that's guaranteed to be 16 bits...
86
250
  #ifndef HAVE_U_INT16_T
87
251
  # if defined HAVE_UINT16_T
88
252
  typedef uint16_t u_int16_t; // true on solaris, possibly other C99 libc's
@@ -98,8 +262,15 @@
98
262
 
99
263
  _START_GOOGLE_NAMESPACE_
100
264
 
101
- using STL_NAMESPACE::vector;
102
- using STL_NAMESPACE::uninitialized_copy;
265
+ namespace base { // just to make google->opensource transition easier
266
+ using GOOGLE_NAMESPACE::true_type;
267
+ using GOOGLE_NAMESPACE::false_type;
268
+ using GOOGLE_NAMESPACE::integral_constant;
269
+ using GOOGLE_NAMESPACE::has_trivial_copy;
270
+ using GOOGLE_NAMESPACE::has_trivial_destructor;
271
+ using GOOGLE_NAMESPACE::is_same;
272
+ }
273
+
103
274
 
104
275
  // The smaller this is, the faster lookup is (because the group bitmap is
105
276
  // smaller) and the faster insert is, because there's less to move.
@@ -108,6 +279,11 @@ using STL_NAMESPACE::uninitialized_copy;
108
279
  static const u_int16_t DEFAULT_SPARSEGROUP_SIZE = 48; // fits in 1.5 words
109
280
 
110
281
 
282
+ // Our iterator as simple as iterators can be: basically it's just
283
+ // the index into our table. Dereference, the only complicated
284
+ // thing, we punt to the table class. This just goes to show how
285
+ // much machinery STL requires to do even the most trivial tasks.
286
+ //
111
287
  // A NOTE ON ASSIGNING:
112
288
  // A sparse table does not actually allocate memory for entries
113
289
  // that are not filled. Because of this, it becomes complicated
@@ -157,7 +333,7 @@ class table_iterator {
157
333
  public:
158
334
  typedef table_iterator iterator;
159
335
 
160
- typedef STL_NAMESPACE::random_access_iterator_tag iterator_category;
336
+ typedef std::random_access_iterator_tag iterator_category;
161
337
  typedef typename tabletype::value_type value_type;
162
338
  typedef typename tabletype::difference_type difference_type;
163
339
  typedef typename tabletype::size_type size_type;
@@ -240,7 +416,7 @@ class const_table_iterator {
240
416
  typedef table_iterator<tabletype> iterator;
241
417
  typedef const_table_iterator const_iterator;
242
418
 
243
- typedef STL_NAMESPACE::random_access_iterator_tag iterator_category;
419
+ typedef std::random_access_iterator_tag iterator_category;
244
420
  typedef typename tabletype::value_type value_type;
245
421
  typedef typename tabletype::difference_type difference_type;
246
422
  typedef typename tabletype::size_type size_type;
@@ -351,7 +527,7 @@ class two_d_iterator {
351
527
  public:
352
528
  typedef two_d_iterator iterator;
353
529
 
354
- typedef STL_NAMESPACE::bidirectional_iterator_tag iterator_category;
530
+ typedef std::bidirectional_iterator_tag iterator_category;
355
531
  // apparently some versions of VC++ have trouble with two ::'s in a typename
356
532
  typedef typename containertype::value_type _tmp_vt;
357
533
  typedef typename _tmp_vt::value_type value_type;
@@ -442,7 +618,7 @@ class const_two_d_iterator {
442
618
  public:
443
619
  typedef const_two_d_iterator iterator;
444
620
 
445
- typedef STL_NAMESPACE::bidirectional_iterator_tag iterator_category;
621
+ typedef std::bidirectional_iterator_tag iterator_category;
446
622
  // apparently some versions of VC++ have trouble with two ::'s in a typename
447
623
  typedef typename containertype::value_type _tmp_vt;
448
624
  typedef typename _tmp_vt::value_type value_type;
@@ -529,7 +705,7 @@ class destructive_two_d_iterator {
529
705
  public:
530
706
  typedef destructive_two_d_iterator iterator;
531
707
 
532
- typedef STL_NAMESPACE::input_iterator_tag iterator_category;
708
+ typedef std::input_iterator_tag iterator_category;
533
709
  // apparently some versions of VC++ have trouble with two ::'s in a typename
534
710
  typedef typename containertype::value_type _tmp_vt;
535
711
  typedef typename _tmp_vt::value_type value_type;
@@ -623,22 +799,6 @@ class destructive_two_d_iterator {
623
799
  // the array (from 1 .. # of non-empty buckets in the group) is
624
800
  // called its "offset."
625
801
 
626
- // The weird mod in the offset is entirely to quiet compiler warnings
627
- // as is the cast to int after doing the "x mod 256"
628
- #define PUT_(take_from, offset) do { \
629
- if (putc(static_cast<int>(((take_from) >> ((offset) % (sizeof(take_from)*8)))\
630
- % 256), fp) \
631
- == EOF) \
632
- return false; \
633
- } while (0)
634
-
635
- #define GET_(add_to, offset) do { \
636
- if ((x=getc(fp)) == EOF) \
637
- return false; \
638
- else \
639
- add_to |= (static_cast<size_type>(x) << ((offset) % (sizeof(add_to)*8))); \
640
- } while (0)
641
-
642
802
  template <class T, u_int16_t GROUP_SIZE, class Alloc>
643
803
  class sparsegroup {
644
804
  private:
@@ -660,15 +820,15 @@ class sparsegroup {
660
820
  element_adaptor;
661
821
  typedef u_int16_t size_type; // max # of buckets
662
822
  typedef int16_t difference_type;
663
- typedef STL_NAMESPACE::reverse_iterator<const_iterator> const_reverse_iterator;
664
- typedef STL_NAMESPACE::reverse_iterator<iterator> reverse_iterator;
823
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
824
+ typedef std::reverse_iterator<iterator> reverse_iterator; // from iterator.h
665
825
 
666
826
  // These are our special iterators, that go over non-empty buckets in a
667
827
  // group. These aren't const-only because you can change non-empty bcks.
668
828
  typedef pointer nonempty_iterator;
669
829
  typedef const_pointer const_nonempty_iterator;
670
- typedef STL_NAMESPACE::reverse_iterator<nonempty_iterator> reverse_nonempty_iterator;
671
- typedef STL_NAMESPACE::reverse_iterator<const_nonempty_iterator> const_reverse_nonempty_iterator;
830
+ typedef std::reverse_iterator<nonempty_iterator> reverse_nonempty_iterator;
831
+ typedef std::reverse_iterator<const_nonempty_iterator> const_reverse_nonempty_iterator;
672
832
 
673
833
  // Iterator functions
674
834
  iterator begin() { return iterator(this, 0); }
@@ -683,8 +843,12 @@ class sparsegroup {
683
843
  // We'll have versions for our special non-empty iterator too
684
844
  nonempty_iterator nonempty_begin() { return group; }
685
845
  const_nonempty_iterator nonempty_begin() const { return group; }
686
- nonempty_iterator nonempty_end() { return group + num_buckets; }
687
- const_nonempty_iterator nonempty_end() const { return group + num_buckets; }
846
+ nonempty_iterator nonempty_end() {
847
+ return group + settings.num_buckets;
848
+ }
849
+ const_nonempty_iterator nonempty_end() const {
850
+ return group + settings.num_buckets;
851
+ }
688
852
  reverse_nonempty_iterator nonempty_rbegin() {
689
853
  return reverse_nonempty_iterator(nonempty_end());
690
854
  }
@@ -716,13 +880,12 @@ class sparsegroup {
716
880
  void bmclear(size_type i) { bitmap[charbit(i)] &= ~modbit(i); }
717
881
 
718
882
  pointer allocate_group(size_type n) {
719
- pointer retval = allocator.allocate(n);
883
+ pointer retval = settings.allocate(n);
720
884
  if (retval == NULL) {
721
885
  // We really should use PRIuS here, but I don't want to have to add
722
886
  // a whole new configure option, with concomitant macro namespace
723
887
  // pollution, just to print this (unlikely) error message. So I cast.
724
- fprintf(stderr, "sparsehash: FATAL ERROR: "
725
- "failed to allocate %lu groups\n",
888
+ fprintf(stderr, "sparsehash FATAL ERROR: failed to allocate %lu groups\n",
726
889
  static_cast<unsigned long>(n));
727
890
  exit(1);
728
891
  }
@@ -731,26 +894,17 @@ class sparsegroup {
731
894
 
732
895
  void free_group() {
733
896
  if (!group) return;
734
- pointer end_it = group + num_buckets;
897
+ pointer end_it = group + settings.num_buckets;
735
898
  for (pointer p = group; p != end_it; ++p)
736
899
  p->~value_type();
737
- allocator.deallocate(group, num_buckets);
900
+ settings.deallocate(group, settings.num_buckets);
738
901
  group = NULL;
739
902
  }
740
903
 
741
- public: // get_iter() in sparsetable needs it
742
- // We need a small function that tells us how many set bits there are
743
- // in positions 0..i-1 of the bitmap. It uses a big table.
744
- // We make it static so templates don't allocate lots of these tables.
745
- // There are lots of ways to do this calculation (called 'popcount').
746
- // The 8-bit table lookup is one of the fastest, though this
747
- // implementation suffers from not doing any loop unrolling. See, eg,
748
- // http://www.dalkescientific.com/writings/diary/archive/2008/07/03/hakmem_and_other_popcounts.html
749
- // http://gurmeetsingh.wordpress.com/2008/08/05/fast-bit-counting-routines/
750
- static size_type pos_to_offset(const unsigned char *bm, size_type pos) {
904
+ static size_type bits_in_char(unsigned char c) {
751
905
  // We could make these ints. The tradeoff is size (eg does it overwhelm
752
- // the cache?) vs efficiency in referencing sub-word-sized array elements
753
- static const char bits_in[256] = { // # of bits set in one char
906
+ // the cache?) vs efficiency in referencing sub-word-sized array elements.
907
+ static const char bits_in[256] = {
754
908
  0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
755
909
  1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
756
910
  1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
@@ -768,30 +922,77 @@ class sparsegroup {
768
922
  3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
769
923
  4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
770
924
  };
925
+ return bits_in[c];
926
+ }
927
+
928
+ public: // get_iter() in sparsetable needs it
929
+ // We need a small function that tells us how many set bits there are
930
+ // in positions 0..i-1 of the bitmap. It uses a big table.
931
+ // We make it static so templates don't allocate lots of these tables.
932
+ // There are lots of ways to do this calculation (called 'popcount').
933
+ // The 8-bit table lookup is one of the fastest, though this
934
+ // implementation suffers from not doing any loop unrolling. See, eg,
935
+ // http://www.dalkescientific.com/writings/diary/archive/2008/07/03/hakmem_and_other_popcounts.html
936
+ // http://gurmeetsingh.wordpress.com/2008/08/05/fast-bit-counting-routines/
937
+ static size_type pos_to_offset(const unsigned char *bm, size_type pos) {
771
938
  size_type retval = 0;
772
939
 
773
940
  // [Note: condition pos > 8 is an optimization; convince yourself we
774
941
  // give exactly the same result as if we had pos >= 8 here instead.]
775
- for ( ; pos > 8; pos -= 8 ) // bm[0..pos/8-1]
776
- retval += bits_in[*bm++]; // chars we want *all* bits in
777
- return retval + bits_in[*bm & ((1 << pos)-1)]; // the char that includes pos
942
+ for ( ; pos > 8; pos -= 8 ) // bm[0..pos/8-1]
943
+ retval += bits_in_char(*bm++); // chars we want *all* bits in
944
+ return retval + bits_in_char(*bm & ((1 << pos)-1)); // char including pos
778
945
  }
779
946
 
780
- size_type pos_to_offset(size_type pos) const { // not static but still const
947
+ size_type pos_to_offset(size_type pos) const { // not static but still const
781
948
  return pos_to_offset(bitmap, pos);
782
949
  }
783
950
 
951
+ // Returns the (logical) position in the bm[] array, i, such that
952
+ // bm[i] is the offset-th set bit in the array. It is the inverse
953
+ // of pos_to_offset. get_pos() uses this function to find the index
954
+ // of an nonempty_iterator in the table. Bit-twiddling from
955
+ // http://hackersdelight.org/basics.pdf
956
+ static size_type offset_to_pos(const unsigned char *bm, size_type offset) {
957
+ size_type retval = 0;
958
+ // This is sizeof(this->bitmap).
959
+ const size_type group_size = (GROUP_SIZE-1) / 8 + 1;
960
+ for (size_type i = 0; i < group_size; i++) { // forward scan
961
+ const size_type pop_count = bits_in_char(*bm);
962
+ if (pop_count > offset) {
963
+ unsigned char last_bm = *bm;
964
+ for (; offset > 0; offset--) {
965
+ last_bm &= (last_bm-1); // remove right-most set bit
966
+ }
967
+ // Clear all bits to the left of the rightmost bit (the &),
968
+ // and then clear the rightmost bit but set all bits to the
969
+ // right of it (the -1).
970
+ last_bm = (last_bm & -last_bm) - 1;
971
+ retval += bits_in_char(last_bm);
972
+ return retval;
973
+ }
974
+ offset -= pop_count;
975
+ retval += 8;
976
+ bm++;
977
+ }
978
+ return retval;
979
+ }
980
+
981
+ size_type offset_to_pos(size_type offset) const {
982
+ return offset_to_pos(bitmap, offset);
983
+ }
984
+
784
985
 
785
986
  public:
786
987
  // Constructors -- default and copy -- and destructor
787
- sparsegroup(allocator_type& a) : allocator(a), group(0), num_buckets(0) {
988
+ explicit sparsegroup(allocator_type& a) :
989
+ group(0), settings(alloc_impl<value_alloc_type>(a)) {
788
990
  memset(bitmap, 0, sizeof(bitmap));
789
991
  }
790
- sparsegroup(const sparsegroup& x)
791
- : allocator(x.allocator), group(0), num_buckets(x.num_buckets) {
792
- if ( num_buckets ) {
793
- group = allocate_group(x.num_buckets);
794
- uninitialized_copy(x.group, x.group + x.num_buckets, group);
992
+ sparsegroup(const sparsegroup& x) : group(0), settings(x.settings) {
993
+ if ( settings.num_buckets ) {
994
+ group = allocate_group(x.settings.num_buckets);
995
+ std::uninitialized_copy(x.group, x.group + x.settings.num_buckets, group);
795
996
  }
796
997
  memcpy(bitmap, x.bitmap, sizeof(bitmap));
797
998
  }
@@ -802,25 +1003,25 @@ class sparsegroup {
802
1003
  // copy constructor.
803
1004
  sparsegroup &operator=(const sparsegroup& x) {
804
1005
  if ( &x == this ) return *this; // x = x
805
- if ( x.num_buckets == 0 ) {
1006
+ if ( x.settings.num_buckets == 0 ) {
806
1007
  free_group();
807
1008
  } else {
808
- pointer p = allocate_group(x.num_buckets);
809
- uninitialized_copy(x.group, x.group + x.num_buckets, p);
1009
+ pointer p = allocate_group(x.settings.num_buckets);
1010
+ std::uninitialized_copy(x.group, x.group + x.settings.num_buckets, p);
810
1011
  free_group();
811
1012
  group = p;
812
1013
  }
813
1014
  memcpy(bitmap, x.bitmap, sizeof(bitmap));
814
- num_buckets = x.num_buckets;
1015
+ settings.num_buckets = x.settings.num_buckets;
815
1016
  return *this;
816
1017
  }
817
1018
 
818
1019
  // Many STL algorithms use swap instead of copy constructors
819
1020
  void swap(sparsegroup& x) {
820
- STL_NAMESPACE::swap(group, x.group);
1021
+ std::swap(group, x.group); // defined in <algorithm>
821
1022
  for ( int i = 0; i < sizeof(bitmap) / sizeof(*bitmap); ++i )
822
- STL_NAMESPACE::swap(bitmap[i], x.bitmap[i]); // swap not defined on arrays
823
- STL_NAMESPACE::swap(num_buckets, x.num_buckets);
1023
+ std::swap(bitmap[i], x.bitmap[i]); // swap not defined on arrays
1024
+ std::swap(settings.num_buckets, x.settings.num_buckets);
824
1025
  // we purposefully don't swap the allocator, which may not be swap-able
825
1026
  }
826
1027
 
@@ -828,7 +1029,7 @@ class sparsegroup {
828
1029
  void clear() {
829
1030
  free_group();
830
1031
  memset(bitmap, 0, sizeof(bitmap));
831
- num_buckets = 0;
1032
+ settings.num_buckets = 0;
832
1033
  }
833
1034
 
834
1035
  // Functions that tell you about size. Alas, these aren't so useful
@@ -837,7 +1038,7 @@ class sparsegroup {
837
1038
  size_type max_size() const { return GROUP_SIZE; }
838
1039
  bool empty() const { return false; }
839
1040
  // We also may want to know how many *used* buckets there are
840
- size_type num_nonempty() const { return num_buckets; }
1041
+ size_type num_nonempty() const { return settings.num_buckets; }
841
1042
 
842
1043
 
843
1044
  // get()/set() are explicitly const/non-const. You can use [] if
@@ -882,21 +1083,22 @@ class sparsegroup {
882
1083
  // But there's no way to capture that using type_traits, so we
883
1084
  // pretend that move(x, y) is equivalent to "x.~T(); new(x) T(y);"
884
1085
  // which is pretty much correct, if a bit conservative.)
885
- void set_aux(size_type offset, true_type) {
886
- group = allocator.realloc_or_die(group, num_buckets+1);
1086
+ void set_aux(size_type offset, base::true_type) {
1087
+ group = settings.realloc_or_die(group, settings.num_buckets+1);
887
1088
  // This is equivalent to memmove(), but faster on my Intel P4,
888
1089
  // at least with gcc4.1 -O2 / glibc 2.3.6.
889
- for (size_type i = num_buckets; i > offset; --i)
1090
+ for (size_type i = settings.num_buckets; i > offset; --i)
890
1091
  memcpy(group + i, group + i-1, sizeof(*group));
891
1092
  }
892
1093
 
893
1094
  // Create space at group[offset], without special assumptions about value_type
894
1095
  // and allocator_type.
895
- void set_aux(size_type offset, false_type) {
1096
+ void set_aux(size_type offset, base::false_type) {
896
1097
  // This is valid because 0 <= offset <= num_buckets
897
- pointer p = allocate_group(num_buckets + 1);
898
- uninitialized_copy(group, group + offset, p);
899
- uninitialized_copy(group + offset, group + num_buckets, p + offset + 1);
1098
+ pointer p = allocate_group(settings.num_buckets + 1);
1099
+ std::uninitialized_copy(group, group + offset, p);
1100
+ std::uninitialized_copy(group + offset, group + settings.num_buckets,
1101
+ p + offset + 1);
900
1102
  free_group();
901
1103
  group = p;
902
1104
  }
@@ -911,14 +1113,15 @@ class sparsegroup {
911
1113
  // Delete the old value, which we're replacing with the new one
912
1114
  group[offset].~value_type();
913
1115
  } else {
914
- typedef integral_constant<bool,
915
- (has_trivial_copy<value_type>::value &&
916
- has_trivial_destructor<value_type>::value &&
917
- is_same<allocator_type,
918
- libc_allocator_with_realloc<value_type> >::value)>
1116
+ typedef base::integral_constant<bool,
1117
+ (base::has_trivial_copy<value_type>::value &&
1118
+ base::has_trivial_destructor<value_type>::value &&
1119
+ base::is_same<
1120
+ allocator_type,
1121
+ libc_allocator_with_realloc<value_type> >::value)>
919
1122
  realloc_and_memmove_ok; // we pretend mv(x,y) == "x.~T(); new(x) T(y)"
920
1123
  set_aux(offset, realloc_and_memmove_ok());
921
- ++num_buckets;
1124
+ ++settings.num_buckets;
922
1125
  bmset(i);
923
1126
  }
924
1127
  // This does the actual inserting. Since we made the array using
@@ -943,25 +1146,26 @@ class sparsegroup {
943
1146
  // there's no way to capture that using type_traits, so we pretend
944
1147
  // that move(x, y) is equivalent to ""x.~T(); new(x) T(y);"
945
1148
  // which is pretty much correct, if a bit conservative.)
946
- void erase_aux(size_type offset, true_type) {
1149
+ void erase_aux(size_type offset, base::true_type) {
947
1150
  // This isn't technically necessary, since we know we have a
948
1151
  // trivial destructor, but is a cheap way to get a bit more safety.
949
1152
  group[offset].~value_type();
950
1153
  // This is equivalent to memmove(), but faster on my Intel P4,
951
1154
  // at lesat with gcc4.1 -O2 / glibc 2.3.6.
952
- assert(num_buckets > 0);
953
- for (size_type i = offset; i < num_buckets-1; ++i)
1155
+ assert(settings.num_buckets > 0);
1156
+ for (size_type i = offset; i < settings.num_buckets-1; ++i)
954
1157
  memcpy(group + i, group + i+1, sizeof(*group)); // hopefully inlined!
955
- group = allocator.realloc_or_die(group, num_buckets-1);
1158
+ group = settings.realloc_or_die(group, settings.num_buckets-1);
956
1159
  }
957
1160
 
958
1161
  // Shrink the array, without any special assumptions about value_type and
959
1162
  // allocator_type.
960
- void erase_aux(size_type offset, false_type) {
1163
+ void erase_aux(size_type offset, base::false_type) {
961
1164
  // This is valid because 0 <= offset < num_buckets. Note the inequality.
962
- pointer p = allocate_group(num_buckets - 1);
963
- uninitialized_copy(group, group + offset, p);
964
- uninitialized_copy(group + offset + 1, group + num_buckets, p + offset);
1165
+ pointer p = allocate_group(settings.num_buckets - 1);
1166
+ std::uninitialized_copy(group, group + offset, p);
1167
+ std::uninitialized_copy(group + offset + 1, group + settings.num_buckets,
1168
+ p + offset);
965
1169
  free_group();
966
1170
  group = p;
967
1171
  }
@@ -972,22 +1176,22 @@ class sparsegroup {
972
1176
  // TODO(austern): Make this exception safe: handle exceptions from
973
1177
  // value_type's copy constructor.
974
1178
  void erase(size_type i) {
975
- if ( bmtest(i) ) { // trivial to erase empty bucket
1179
+ if ( bmtest(i) ) { // trivial to erase empty bucket
976
1180
  size_type offset = pos_to_offset(bitmap,i); // where we'll find (or insert)
977
- if ( num_buckets == 1 ) {
1181
+ if ( settings.num_buckets == 1 ) {
978
1182
  free_group();
979
1183
  group = NULL;
980
1184
  } else {
981
- typedef integral_constant<bool,
982
- (has_trivial_copy<value_type>::value &&
983
- has_trivial_destructor<value_type>::value &&
984
- is_same<
1185
+ typedef base::integral_constant<bool,
1186
+ (base::has_trivial_copy<value_type>::value &&
1187
+ base::has_trivial_destructor<value_type>::value &&
1188
+ base::is_same<
985
1189
  allocator_type,
986
1190
  libc_allocator_with_realloc<value_type> >::value)>
987
1191
  realloc_and_memmove_ok; // pretend mv(x,y) == "x.~T(); new(x) T(y)"
988
1192
  erase_aux(offset, realloc_and_memmove_ok());
989
1193
  }
990
- --num_buckets;
1194
+ --settings.num_buckets;
991
1195
  bmclear(i);
992
1196
  }
993
1197
  }
@@ -1008,64 +1212,68 @@ class sparsegroup {
1008
1212
  // We support reading and writing groups to disk. We don't store
1009
1213
  // the actual array contents (which we don't know how to store),
1010
1214
  // just the bitmap and size. Meant to be used with table I/O.
1011
- // Returns true if all was ok
1012
- bool write_metadata(FILE *fp) const {
1013
- assert(sizeof(num_buckets) == 2); // we explicitly set to u_int16_t
1014
- PUT_(num_buckets, 8);
1015
- PUT_(num_buckets, 0);
1016
- if ( !fwrite(bitmap, sizeof(bitmap), 1, fp) ) return false;
1215
+
1216
+ template <typename OUTPUT> bool write_metadata(OUTPUT *fp) const {
1217
+ // we explicitly set to u_int16_t
1218
+ assert(sizeof(settings.num_buckets) == 2);
1219
+ if ( !sparsehash_internal::write_bigendian_number(fp, settings.num_buckets,
1220
+ 2) )
1221
+ return false;
1222
+ if ( !sparsehash_internal::write_data(fp, bitmap, sizeof(bitmap)) )
1223
+ return false;
1017
1224
  return true;
1018
1225
  }
1019
1226
 
1020
- // Reading destroys the old group contents! Returns true if all was ok
1021
- bool read_metadata(FILE *fp) {
1227
+ // Reading destroys the old group contents! Returns true if all was ok.
1228
+ template <typename INPUT> bool read_metadata(INPUT *fp) {
1022
1229
  clear();
1023
-
1024
- int x; // the GET_ macro requires an 'int x' to be defined
1025
- GET_(num_buckets, 8);
1026
- GET_(num_buckets, 0);
1027
-
1028
- if ( !fread(bitmap, sizeof(bitmap), 1, fp) ) return false;
1029
-
1230
+ if ( !sparsehash_internal::read_bigendian_number(fp, &settings.num_buckets,
1231
+ 2) )
1232
+ return false;
1233
+ if ( !sparsehash_internal::read_data(fp, bitmap, sizeof(bitmap)) )
1234
+ return false;
1030
1235
  // We'll allocate the space, but we won't fill it: it will be
1031
1236
  // left as uninitialized raw memory.
1032
- group = allocate_group(num_buckets);
1237
+ group = allocate_group(settings.num_buckets);
1033
1238
  return true;
1034
1239
  }
1035
1240
 
1241
+ // Again, only meaningful if value_type is a POD.
1242
+ template <typename INPUT> bool read_nopointer_data(INPUT *fp) {
1243
+ for ( nonempty_iterator it = nonempty_begin();
1244
+ it != nonempty_end(); ++it ) {
1245
+ if ( !sparsehash_internal::read_data(fp, &(*it), sizeof(*it)) )
1246
+ return false;
1247
+ }
1248
+ return true;
1249
+ }
1250
+
1036
1251
  // If your keys and values are simple enough, we can write them
1037
1252
  // to disk for you. "simple enough" means POD and no pointers.
1038
- // However, we don't try to normalize endianness
1039
- bool write_nopointer_data(FILE *fp) const {
1253
+ // However, we don't try to normalize endianness.
1254
+ template <typename OUTPUT> bool write_nopointer_data(OUTPUT *fp) const {
1040
1255
  for ( const_nonempty_iterator it = nonempty_begin();
1041
1256
  it != nonempty_end(); ++it ) {
1042
- if ( !fwrite(&*it, sizeof(*it), 1, fp) ) return false;
1043
- }
1044
- return true;
1045
- }
1046
-
1047
- // When reading, we have to override the potential const-ness of *it.
1048
- // Again, only meaningful if value_type is a POD.
1049
- bool read_nopointer_data(FILE *fp) {
1050
- for ( nonempty_iterator it = nonempty_begin();
1051
- it != nonempty_end(); ++it ) {
1052
- if ( !fread(reinterpret_cast<void*>(&(*it)), sizeof(*it), 1, fp) )
1257
+ if ( !sparsehash_internal::write_data(fp, &(*it), sizeof(*it)) )
1053
1258
  return false;
1054
1259
  }
1055
1260
  return true;
1056
1261
  }
1057
1262
 
1058
- // Comparisons. Note the comparisons are pretty arbitrary: we
1059
- // compare values of the first index that isn't equal (using default
1263
+
1264
+ // Comparisons. We only need to define == and < -- we get
1265
+ // != > <= >= via relops.h (which we happily included above).
1266
+ // Note the comparisons are pretty arbitrary: we compare
1267
+ // values of the first index that isn't equal (using default
1060
1268
  // value for empty buckets).
1061
1269
  bool operator==(const sparsegroup& x) const {
1062
- return ( num_buckets == x.num_buckets &&
1270
+ return ( settings.num_buckets == x.settings.num_buckets &&
1063
1271
  memcmp(bitmap, x.bitmap, sizeof(bitmap)) == 0 &&
1064
- STL_NAMESPACE::equal(begin(), end(), x.begin()) ); // from algorithm
1272
+ std::equal(begin(), end(), x.begin()) ); // from <algorithm>
1065
1273
  }
1066
- bool operator<(const sparsegroup& x) const { // also from algorithm
1067
- return STL_NAMESPACE::lexicographical_compare(begin(), end(),
1068
- x.begin(), x.end());
1274
+
1275
+ bool operator<(const sparsegroup& x) const { // also from <algorithm>
1276
+ return std::lexicographical_compare(begin(), end(), x.begin(), x.end());
1069
1277
  }
1070
1278
  bool operator!=(const sparsegroup& x) const { return !(*this == x); }
1071
1279
  bool operator<=(const sparsegroup& x) const { return !(x < *this); }
@@ -1084,9 +1292,9 @@ class sparsegroup {
1084
1292
 
1085
1293
  // realloc_or_die should only be used when using the default
1086
1294
  // allocator (libc_allocator_with_realloc).
1087
- pointer realloc_or_die(pointer ptr, size_type n) {
1295
+ pointer realloc_or_die(pointer /*ptr*/, size_type /*n*/) {
1088
1296
  fprintf(stderr, "realloc_or_die is only supported for "
1089
- "libc_allocator_with_realloc");
1297
+ "libc_allocator_with_realloc\n");
1090
1298
  exit(1);
1091
1299
  return NULL;
1092
1300
  }
@@ -1107,23 +1315,32 @@ class sparsegroup {
1107
1315
  pointer realloc_or_die(pointer ptr, size_type n) {
1108
1316
  pointer retval = this->reallocate(ptr, n);
1109
1317
  if (retval == NULL) {
1110
- // We really should use PRIuS here, but I don't want to have to add
1111
- // a whole new configure option, with concomitant macro namespace
1112
- // pollution, just to print this (unlikely) error message. So I cast.
1113
1318
  fprintf(stderr, "sparsehash: FATAL ERROR: failed to reallocate "
1114
- "%lu elements for ptr %p",
1115
- static_cast<unsigned long>(n), ptr);
1319
+ "%lu elements for ptr %p", static_cast<unsigned long>(n), ptr);
1116
1320
  exit(1);
1117
1321
  }
1118
1322
  return retval;
1119
1323
  }
1120
1324
  };
1121
1325
 
1326
+ // Package allocator with num_buckets to eliminate memory needed for the
1327
+ // zero-size allocator.
1328
+ // If new fields are added to this class, we should add them to
1329
+ // operator= and swap.
1330
+ class Settings : public alloc_impl<value_alloc_type> {
1331
+ public:
1332
+ Settings(const alloc_impl<value_alloc_type>& a, u_int16_t n = 0)
1333
+ : alloc_impl<value_alloc_type>(a), num_buckets(n) { }
1334
+ Settings(const Settings& s)
1335
+ : alloc_impl<value_alloc_type>(s), num_buckets(s.num_buckets) { }
1336
+
1337
+ u_int16_t num_buckets; // limits GROUP_SIZE to 64K
1338
+ };
1339
+
1122
1340
  // The actual data
1123
- alloc_impl<value_alloc_type> allocator; // allocator for memory
1124
- pointer group; // (small) array of T's
1125
- unsigned char bitmap[(GROUP_SIZE-1)/8 + 1]; // fancy math is so we round up
1126
- size_type num_buckets; // limits GROUP_SIZE to 64K
1341
+ pointer group; // (small) array of T's
1342
+ Settings settings; // allocator and num_buckets
1343
+ unsigned char bitmap[(GROUP_SIZE-1)/8 + 1]; // fancy math is so we round up
1127
1344
  };
1128
1345
 
1129
1346
  // We need a global swap as well
@@ -1159,26 +1376,26 @@ class sparsetable {
1159
1376
  const_iterator;
1160
1377
  typedef table_element_adaptor<sparsetable<T, GROUP_SIZE, Alloc> >
1161
1378
  element_adaptor;
1162
- typedef STL_NAMESPACE::reverse_iterator<const_iterator> const_reverse_iterator;
1163
- typedef STL_NAMESPACE::reverse_iterator<iterator> reverse_iterator;
1379
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
1380
+ typedef std::reverse_iterator<iterator> reverse_iterator; // from iterator.h
1164
1381
 
1165
1382
  // These are our special iterators, that go over non-empty buckets in a
1166
1383
  // table. These aren't const only because you can change non-empty bcks.
1167
- typedef two_d_iterator< vector< sparsegroup<value_type, GROUP_SIZE,
1168
- value_alloc_type>,
1384
+ typedef two_d_iterator< std::vector< sparsegroup<value_type, GROUP_SIZE,
1385
+ value_alloc_type>,
1169
1386
  vector_alloc> >
1170
1387
  nonempty_iterator;
1171
- typedef const_two_d_iterator< vector< sparsegroup<value_type,
1172
- GROUP_SIZE,
1173
- value_alloc_type>,
1388
+ typedef const_two_d_iterator< std::vector< sparsegroup<value_type,
1389
+ GROUP_SIZE,
1390
+ value_alloc_type>,
1174
1391
  vector_alloc> >
1175
1392
  const_nonempty_iterator;
1176
- typedef STL_NAMESPACE::reverse_iterator<nonempty_iterator> reverse_nonempty_iterator;
1177
- typedef STL_NAMESPACE::reverse_iterator<const_nonempty_iterator> const_reverse_nonempty_iterator;
1393
+ typedef std::reverse_iterator<nonempty_iterator> reverse_nonempty_iterator;
1394
+ typedef std::reverse_iterator<const_nonempty_iterator> const_reverse_nonempty_iterator;
1178
1395
  // Another special iterator: it frees memory as it iterates (used to resize)
1179
- typedef destructive_two_d_iterator< vector< sparsegroup<value_type,
1180
- GROUP_SIZE,
1181
- value_alloc_type>,
1396
+ typedef destructive_two_d_iterator< std::vector< sparsegroup<value_type,
1397
+ GROUP_SIZE,
1398
+ value_alloc_type>,
1182
1399
  vector_alloc> >
1183
1400
  destructive_iterator;
1184
1401
 
@@ -1225,7 +1442,7 @@ class sparsetable {
1225
1442
  }
1226
1443
 
1227
1444
  typedef sparsegroup<value_type, GROUP_SIZE, allocator_type> group_type;
1228
- typedef vector<group_type, vector_alloc > group_vector_type;
1445
+ typedef std::vector<group_type, vector_alloc > group_vector_type;
1229
1446
 
1230
1447
  typedef typename group_vector_type::reference GroupsReference;
1231
1448
  typedef typename group_vector_type::const_reference GroupsConstReference;
@@ -1252,19 +1469,18 @@ class sparsetable {
1252
1469
 
1253
1470
  public:
1254
1471
  // Constructors -- default, normal (when you specify size), and copy
1255
- sparsetable(size_type sz = 0, Alloc alloc = Alloc())
1256
- : groups(vector_alloc(alloc)),
1257
- table_size(sz), num_buckets(0), allocator(alloc) {
1258
- groups.resize(num_groups(sz), group_type(allocator));
1472
+ explicit sparsetable(size_type sz = 0, Alloc alloc = Alloc())
1473
+ : groups(vector_alloc(alloc)), settings(alloc, sz) {
1474
+ groups.resize(num_groups(sz), group_type(settings));
1259
1475
  }
1260
1476
  // We can get away with using the default copy constructor,
1261
1477
  // and default destructor, and hence the default operator=. Huzzah!
1262
1478
 
1263
1479
  // Many STL algorithms use swap instead of copy constructors
1264
1480
  void swap(sparsetable& x) {
1265
- STL_NAMESPACE::swap(groups, x.groups);
1266
- STL_NAMESPACE::swap(table_size, x.table_size);
1267
- STL_NAMESPACE::swap(num_buckets, x.num_buckets);
1481
+ std::swap(groups, x.groups); // defined in stl_algobase.h
1482
+ std::swap(settings.table_size, x.settings.table_size);
1483
+ std::swap(settings.num_buckets, x.settings.num_buckets);
1268
1484
  }
1269
1485
 
1270
1486
  // It's always nice to be able to clear a table without deallocating it
@@ -1273,41 +1489,45 @@ class sparsetable {
1273
1489
  for ( group = groups.begin(); group != groups.end(); ++group ) {
1274
1490
  group->clear();
1275
1491
  }
1276
- num_buckets = 0;
1492
+ settings.num_buckets = 0;
1277
1493
  }
1278
1494
 
1279
1495
  // ACCESSOR FUNCTIONS for the things we templatize on, basically
1280
- allocator_type get_allocator() const { return allocator; }
1496
+ allocator_type get_allocator() const {
1497
+ return allocator_type(settings);
1498
+ }
1281
1499
 
1282
1500
 
1283
1501
  // Functions that tell you about size.
1284
1502
  // NOTE: empty() is non-intuitive! It does not tell you the number
1285
1503
  // of not-empty buckets (use num_nonempty() for that). Instead
1286
1504
  // it says whether you've allocated any buckets or not.
1287
- size_type size() const { return table_size; }
1288
- size_type max_size() const { return allocator.max_size(); }
1289
- bool empty() const { return table_size == 0; }
1505
+ size_type size() const { return settings.table_size; }
1506
+ size_type max_size() const { return settings.max_size(); }
1507
+ bool empty() const { return settings.table_size == 0; }
1290
1508
  // We also may want to know how many *used* buckets there are
1291
- size_type num_nonempty() const { return num_buckets; }
1509
+ size_type num_nonempty() const { return settings.num_buckets; }
1292
1510
 
1293
1511
  // OK, we'll let you resize one of these puppies
1294
1512
  void resize(size_type new_size) {
1295
- groups.resize(num_groups(new_size), group_type(allocator));
1296
- if ( new_size < table_size) { // lower num_buckets, clear last group
1513
+ groups.resize(num_groups(new_size), group_type(settings));
1514
+ if ( new_size < settings.table_size) {
1515
+ // lower num_buckets, clear last group
1297
1516
  if ( pos_in_group(new_size) > 0 ) // need to clear inside last group
1298
1517
  groups.back().erase(groups.back().begin() + pos_in_group(new_size),
1299
1518
  groups.back().end());
1300
- num_buckets = 0; // refigure # of used buckets
1519
+ settings.num_buckets = 0; // refigure # of used buckets
1301
1520
  GroupsConstIterator group;
1302
1521
  for ( group = groups.begin(); group != groups.end(); ++group )
1303
- num_buckets += group->num_nonempty();
1522
+ settings.num_buckets += group->num_nonempty();
1304
1523
  }
1305
- table_size = new_size;
1524
+ settings.table_size = new_size;
1306
1525
  }
1307
1526
 
1308
1527
 
1309
1528
  // We let you see if a bucket is non-empty without retrieving it
1310
1529
  bool test(size_type i) const {
1530
+ assert(i < settings.table_size);
1311
1531
  return which_group(i).test(pos_in_group(i));
1312
1532
  }
1313
1533
  bool test(iterator pos) const {
@@ -1320,7 +1540,7 @@ class sparsetable {
1320
1540
  // We only return const_references because it's really hard to
1321
1541
  // return something settable for empty buckets. Use set() instead.
1322
1542
  const_reference get(size_type i) const {
1323
- assert(i < table_size);
1543
+ assert(i < settings.table_size);
1324
1544
  return which_group(i).get(pos_in_group(i));
1325
1545
  }
1326
1546
 
@@ -1328,17 +1548,17 @@ class sparsetable {
1328
1548
  // This is used by sparse_hashtable to get an element from the table
1329
1549
  // when we know it exists (because the caller has called test(i)).
1330
1550
  const_reference unsafe_get(size_type i) const {
1331
- assert(i < table_size);
1551
+ assert(i < settings.table_size);
1332
1552
  assert(test(i));
1333
1553
  return which_group(i).unsafe_get(pos_in_group(i));
1334
1554
  }
1335
1555
 
1336
1556
  // TODO(csilvers): make protected + friend element_adaptor
1337
1557
  reference mutating_get(size_type i) { // fills bucket i before getting
1338
- assert(i < table_size);
1339
- size_type old_numbuckets = which_group(i).num_nonempty();
1558
+ assert(i < settings.table_size);
1559
+ typename group_type::size_type old_numbuckets = which_group(i).num_nonempty();
1340
1560
  reference retval = which_group(i).mutating_get(pos_in_group(i));
1341
- num_buckets += which_group(i).num_nonempty() - old_numbuckets;
1561
+ settings.num_buckets += which_group(i).num_nonempty() - old_numbuckets;
1342
1562
  return retval;
1343
1563
  }
1344
1564
 
@@ -1370,24 +1590,33 @@ class sparsetable {
1370
1590
  groups[group_num(i)].pos_to_offset(pos_in_group(i))));
1371
1591
  }
1372
1592
 
1593
+ // And the reverse transformation.
1594
+ size_type get_pos(const const_nonempty_iterator it) const {
1595
+ difference_type current_row = it.row_current - it.row_begin;
1596
+ difference_type current_col = (it.col_current -
1597
+ groups[current_row].nonempty_begin());
1598
+ return ((current_row * GROUP_SIZE) +
1599
+ groups[current_row].offset_to_pos(current_col));
1600
+ }
1601
+
1373
1602
 
1374
1603
  // This returns a reference to the inserted item (which is a copy of val)
1375
1604
  // The trick is to figure out whether we're replacing or inserting anew
1376
1605
  reference set(size_type i, const_reference val) {
1377
- assert(i < table_size);
1378
- size_type old_numbuckets = which_group(i).num_nonempty();
1606
+ assert(i < settings.table_size);
1607
+ typename group_type::size_type old_numbuckets = which_group(i).num_nonempty();
1379
1608
  reference retval = which_group(i).set(pos_in_group(i), val);
1380
- num_buckets += which_group(i).num_nonempty() - old_numbuckets;
1609
+ settings.num_buckets += which_group(i).num_nonempty() - old_numbuckets;
1381
1610
  return retval;
1382
1611
  }
1383
1612
 
1384
1613
  // This takes the specified elements out of the table. This is
1385
1614
  // "undefining", rather than "clearing".
1386
1615
  void erase(size_type i) {
1387
- assert(i < table_size);
1388
- size_type old_numbuckets = which_group(i).num_nonempty();
1616
+ assert(i < settings.table_size);
1617
+ typename group_type::size_type old_numbuckets = which_group(i).num_nonempty();
1389
1618
  which_group(i).erase(pos_in_group(i));
1390
- num_buckets += which_group(i).num_nonempty() - old_numbuckets;
1619
+ settings.num_buckets += which_group(i).num_nonempty() - old_numbuckets;
1391
1620
  }
1392
1621
 
1393
1622
  void erase(iterator pos) {
@@ -1408,7 +1637,8 @@ class sparsetable {
1408
1637
 
1409
1638
  private:
1410
1639
  // Every time the disk format changes, this should probably change too
1411
- static const unsigned long MAGIC_NUMBER = 0x24687531;
1640
+ typedef unsigned long MagicNumberType;
1641
+ static const MagicNumberType MAGIC_NUMBER = 0x24687531;
1412
1642
 
1413
1643
  // Old versions of this code write all data in 32 bits. We need to
1414
1644
  // support these files as well as having support for 64-bit systems.
@@ -1418,57 +1648,42 @@ class sparsetable {
1418
1648
  // causes us to mis-read old-version code that stores exactly
1419
1649
  // 0xFFFFFFF, but I don't think that is likely to have happened for
1420
1650
  // these particular values.
1421
- static bool write_32_or_64(FILE* fp, size_type value) {
1651
+ template <typename OUTPUT, typename IntType>
1652
+ static bool write_32_or_64(OUTPUT* fp, IntType value) {
1422
1653
  if ( value < 0xFFFFFFFFULL ) { // fits in 4 bytes
1423
- PUT_(value, 24);
1424
- PUT_(value, 16);
1425
- PUT_(value, 8);
1426
- PUT_(value, 0);
1427
- } else if ( value == 0xFFFFFFFFUL ) { // special case in 32bit systems
1428
- PUT_(0xFF, 0); PUT_(0xFF, 0); PUT_(0xFF, 0); PUT_(0xFF, 0); // marker
1429
- PUT_(0, 0); PUT_(0, 0); PUT_(0, 0); PUT_(0, 0);
1430
- PUT_(0xFF, 0); PUT_(0xFF, 0); PUT_(0xFF, 0); PUT_(0xFF, 0);
1654
+ if ( !sparsehash_internal::write_bigendian_number(fp, value, 4) )
1655
+ return false;
1431
1656
  } else {
1432
- PUT_(0xFF, 0); PUT_(0xFF, 0); PUT_(0xFF, 0); PUT_(0xFF, 0); // marker
1433
- PUT_(value, 56);
1434
- PUT_(value, 48);
1435
- PUT_(value, 40);
1436
- PUT_(value, 32);
1437
- PUT_(value, 24);
1438
- PUT_(value, 16);
1439
- PUT_(value, 8);
1440
- PUT_(value, 0);
1657
+ if ( !sparsehash_internal::write_bigendian_number(fp, 0xFFFFFFFFUL, 4) )
1658
+ return false;
1659
+ if ( !sparsehash_internal::write_bigendian_number(fp, value, 8) )
1660
+ return false;
1441
1661
  }
1442
1662
  return true;
1443
1663
  }
1444
1664
 
1445
- static bool read_32_or_64(FILE* fp, size_type *value) { // reads into value
1446
- size_type first4 = 0;
1447
- int x;
1448
- GET_(first4, 24);
1449
- GET_(first4, 16);
1450
- GET_(first4, 8);
1451
- GET_(first4, 0);
1665
+ template <typename INPUT, typename IntType>
1666
+ static bool read_32_or_64(INPUT* fp, IntType *value) { // reads into value
1667
+ MagicNumberType first4 = 0; // a convenient 32-bit unsigned type
1668
+ if ( !sparsehash_internal::read_bigendian_number(fp, &first4, 4) )
1669
+ return false;
1452
1670
  if ( first4 < 0xFFFFFFFFULL ) {
1453
1671
  *value = first4;
1454
1672
  } else {
1455
- GET_(*value, 56);
1456
- GET_(*value, 48);
1457
- GET_(*value, 40);
1458
- GET_(*value, 32);
1459
- GET_(*value, 24);
1460
- GET_(*value, 16);
1461
- GET_(*value, 8);
1462
- GET_(*value, 0);
1673
+ if ( !sparsehash_internal::read_bigendian_number(fp, value, 8) )
1674
+ return false;
1463
1675
  }
1464
1676
  return true;
1465
1677
  }
1466
1678
 
1467
1679
  public:
1468
- bool write_metadata(FILE *fp) const {
1680
+ // read/write_metadata() and read_write/nopointer_data() are DEPRECATED.
1681
+ // Use serialize() and unserialize(), below, for new code.
1682
+
1683
+ template <typename OUTPUT> bool write_metadata(OUTPUT *fp) const {
1469
1684
  if ( !write_32_or_64(fp, MAGIC_NUMBER) ) return false;
1470
- if ( !write_32_or_64(fp, table_size) ) return false;
1471
- if ( !write_32_or_64(fp, num_buckets) ) return false;
1685
+ if ( !write_32_or_64(fp, settings.table_size) ) return false;
1686
+ if ( !write_32_or_64(fp, settings.num_buckets) ) return false;
1472
1687
 
1473
1688
  GroupsConstIterator group;
1474
1689
  for ( group = groups.begin(); group != groups.end(); ++group )
@@ -1477,7 +1692,7 @@ class sparsetable {
1477
1692
  }
1478
1693
 
1479
1694
  // Reading destroys the old table contents! Returns true if read ok.
1480
- bool read_metadata(FILE *fp) {
1695
+ template <typename INPUT> bool read_metadata(INPUT *fp) {
1481
1696
  size_type magic_read = 0;
1482
1697
  if ( !read_32_or_64(fp, &magic_read) ) return false;
1483
1698
  if ( magic_read != MAGIC_NUMBER ) {
@@ -1485,10 +1700,10 @@ class sparsetable {
1485
1700
  return false;
1486
1701
  }
1487
1702
 
1488
- if ( !read_32_or_64(fp, &table_size) ) return false;
1489
- if ( !read_32_or_64(fp, &num_buckets) ) return false;
1703
+ if ( !read_32_or_64(fp, &settings.table_size) ) return false;
1704
+ if ( !read_32_or_64(fp, &settings.num_buckets) ) return false;
1490
1705
 
1491
- resize(table_size); // so the vector's sized ok
1706
+ resize(settings.table_size); // so the vector's sized ok
1492
1707
  GroupsIterator group;
1493
1708
  for ( group = groups.begin(); group != groups.end(); ++group )
1494
1709
  if ( group->read_metadata(fp) == false ) return false;
@@ -1517,17 +1732,50 @@ class sparsetable {
1517
1732
  return true;
1518
1733
  }
1519
1734
 
1735
+ // INPUT and OUTPUT must be either a FILE, *or* a C++ stream
1736
+ // (istream, ostream, etc) *or* a class providing
1737
+ // Read(void*, size_t) and Write(const void*, size_t)
1738
+ // (respectively), which writes a buffer into a stream
1739
+ // (which the INPUT/OUTPUT instance presumably owns).
1740
+
1741
+ typedef sparsehash_internal::pod_serializer<value_type> NopointerSerializer;
1742
+
1743
+ // ValueSerializer: a functor. operator()(OUTPUT*, const value_type&)
1744
+ template <typename ValueSerializer, typename OUTPUT>
1745
+ bool serialize(ValueSerializer serializer, OUTPUT *fp) {
1746
+ if ( !write_metadata(fp) )
1747
+ return false;
1748
+ for ( const_nonempty_iterator it = nonempty_begin();
1749
+ it != nonempty_end(); ++it ) {
1750
+ if ( !serializer(fp, *it) ) return false;
1751
+ }
1752
+ return true;
1753
+ }
1754
+
1755
+ // ValueSerializer: a functor. operator()(INPUT*, value_type*)
1756
+ template <typename ValueSerializer, typename INPUT>
1757
+ bool unserialize(ValueSerializer serializer, INPUT *fp) {
1758
+ clear();
1759
+ if ( !read_metadata(fp) )
1760
+ return false;
1761
+ for ( nonempty_iterator it = nonempty_begin();
1762
+ it != nonempty_end(); ++it ) {
1763
+ if ( !serializer(fp, &*it) ) return false;
1764
+ }
1765
+ return true;
1766
+ }
1767
+
1520
1768
  // Comparisons. Note the comparisons are pretty arbitrary: we
1521
1769
  // compare values of the first index that isn't equal (using default
1522
1770
  // value for empty buckets).
1523
1771
  bool operator==(const sparsetable& x) const {
1524
- return ( table_size == x.table_size &&
1525
- num_buckets == x.num_buckets &&
1772
+ return ( settings.table_size == x.settings.table_size &&
1773
+ settings.num_buckets == x.settings.num_buckets &&
1526
1774
  groups == x.groups );
1527
1775
  }
1528
- bool operator<(const sparsetable& x) const { // also from algobase.h
1529
- return STL_NAMESPACE::lexicographical_compare(begin(), end(),
1530
- x.begin(), x.end());
1776
+
1777
+ bool operator<(const sparsetable& x) const {
1778
+ return std::lexicographical_compare(begin(), end(), x.begin(), x.end());
1531
1779
  }
1532
1780
  bool operator!=(const sparsetable& x) const { return !(*this == x); }
1533
1781
  bool operator<=(const sparsetable& x) const { return !(x < *this); }
@@ -1536,11 +1784,28 @@ class sparsetable {
1536
1784
 
1537
1785
 
1538
1786
  private:
1787
+ // Package allocator with table_size and num_buckets to eliminate memory
1788
+ // needed for the zero-size allocator.
1789
+ // If new fields are added to this class, we should add them to
1790
+ // operator= and swap.
1791
+ class Settings : public allocator_type {
1792
+ public:
1793
+ typedef typename allocator_type::size_type size_type;
1794
+
1795
+ Settings(const allocator_type& a, size_type sz = 0, size_type n = 0)
1796
+ : allocator_type(a), table_size(sz), num_buckets(n) { }
1797
+
1798
+ Settings(const Settings& s)
1799
+ : allocator_type(s),
1800
+ table_size(s.table_size), num_buckets(s.num_buckets) { }
1801
+
1802
+ size_type table_size; // how many buckets they want
1803
+ size_type num_buckets; // number of non-empty buckets
1804
+ };
1805
+
1539
1806
  // The actual data
1540
- group_vector_type groups; // our list of groups
1541
- size_type table_size; // how many buckets they want
1542
- size_type num_buckets; // number of non-empty buckets
1543
- allocator_type allocator; // just passed in to sparsegroup
1807
+ group_vector_type groups; // our list of groups
1808
+ Settings settings; // allocator, table size, buckets
1544
1809
  };
1545
1810
 
1546
1811
  // We need a global swap as well
@@ -1550,9 +1815,6 @@ inline void swap(sparsetable<T,GROUP_SIZE,Alloc> &x,
1550
1815
  x.swap(y);
1551
1816
  }
1552
1817
 
1553
- #undef GET_
1554
- #undef PUT_
1555
-
1556
1818
  _END_GOOGLE_NAMESPACE_
1557
1819
 
1558
- #endif
1820
+ #endif // UTIL_GTL_SPARSETABLE_H_