google_hash 0.6.2 → 0.7.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README +61 -27
- data/Rakefile +4 -1
- data/TODO +5 -0
- data/VERSION +1 -1
- data/changelog +3 -0
- data/ext/extconf.rb +10 -5
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/AUTHORS +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/COPYING +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/ChangeLog +47 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/INSTALL +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/Makefile.am +29 -14
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/Makefile.in +77 -42
- data/ext/sparsehash-1.8.1/NEWS +71 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/README +0 -0
- data/ext/{sparsehash-1.5.2/README.windows → sparsehash-1.8.1/README_windows.txt} +25 -25
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/TODO +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/aclocal.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/compile +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/config.guess +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/config.sub +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/configure +3690 -4560
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/configure.ac +1 -1
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/depcomp +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/dense_hash_map.html +65 -5
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/dense_hash_set.html +65 -5
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/designstyle.css +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/implementation.html +11 -5
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/index.html +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/performance.html +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/sparse_hash_map.html +65 -5
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/sparse_hash_set.html +65 -5
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/doc/sparsetable.html +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/experimental/Makefile +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/experimental/README +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/experimental/example.c +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/experimental/libchash.c +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/experimental/libchash.h +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/google-sparsehash.sln +17 -1
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/install-sh +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/m4/acx_pthread.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/m4/google_namespace.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/m4/namespaces.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/m4/stl_hash.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/m4/stl_hash_fun.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/m4/stl_namespace.m4 +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/missing +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/mkinstalldirs +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb.sh +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/README +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/changelog +24 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/compat +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/control +1 -1
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/copyright +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/docs +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/rules +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/sparsehash.dirs +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/deb/sparsehash.install +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/rpm.sh +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/packages/rpm/rpm.spec +1 -1
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/config.h.in +3 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/config.h.include +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/dense_hash_map +43 -27
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/dense_hash_set +40 -19
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/sparse_hash_map +32 -23
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/sparse_hash_set +31 -21
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/sparsehash/densehashtable.h +481 -298
- data/ext/sparsehash-1.8.1/src/google/sparsehash/hashtable-common.h +178 -0
- data/ext/sparsehash-1.8.1/src/google/sparsehash/libc_allocator_with_realloc.h +121 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/sparsehash/sparsehashtable.h +404 -233
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/sparsetable +173 -83
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/google/type_traits.h +3 -29
- data/ext/sparsehash-1.8.1/src/hash_test_interface.h +1011 -0
- data/ext/sparsehash-1.8.1/src/hashtable_test.cc +1733 -0
- data/ext/sparsehash-1.8.1/src/libc_allocator_with_realloc_test.cc +129 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/simple_test.cc +1 -1
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/sparsetable_unittest.cc +202 -6
- data/ext/sparsehash-1.8.1/src/testutil.h +251 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/time_hash_map.cc +128 -54
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/type_traits_unittest.cc +30 -20
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/windows/config.h +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/windows/google/sparsehash/sparseconfig.h +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/windows/port.cc +0 -0
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/src/windows/port.h +0 -0
- data/ext/sparsehash-1.8.1/vsprojects/hashtable_test/hashtable_test.vcproj +197 -0
- data/ext/{sparsehash-1.5.2/vsprojects/hashtable_unittest/hashtable_unittest.vcproj → sparsehash-1.8.1/vsprojects/simple_test/simple_test.vcproj} +9 -8
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/vsprojects/sparsetable_unittest/sparsetable_unittest.vcproj +0 -2
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/vsprojects/time_hash_map/time_hash_map.vcproj +3 -2
- data/ext/{sparsehash-1.5.2 → sparsehash-1.8.1}/vsprojects/type_traits_unittest/type_traits_unittest.vcproj +0 -2
- data/ext/template/google_hash.cpp.erb +2 -1
- data/ext/template/main.cpp.erb +1 -1
- data/results.txt +6 -22
- data/spec/benchmark.rb +57 -0
- data/spec/spec.google_hash.rb +1 -8
- metadata +140 -130
- data/ext/benchmark.rb +0 -47
- data/ext/sparsehash-1.5.2/NEWS +0 -0
- data/ext/sparsehash-1.5.2/src/hashtable_unittest.cc +0 -1375
- data/ext/sparsehash-1.5.2/src/words +0 -8944
- data/types.txt +0 -18
data/ext/benchmark.rb
DELETED
@@ -1,47 +0,0 @@
|
|
1
|
-
require './google_hash'
|
2
|
-
require 'benchmark'
|
3
|
-
require 'hitimes'
|
4
|
-
|
5
|
-
def measure
|
6
|
-
Hitimes::Interval.measure { yield }
|
7
|
-
end
|
8
|
-
|
9
|
-
def meas string
|
10
|
-
puts "% -23s" % string + measure { yield }.to_s
|
11
|
-
end
|
12
|
-
|
13
|
-
def go num
|
14
|
-
puts num
|
15
|
-
# get all existing
|
16
|
-
all = [Hash] + Object.constants.grep(/Goog/).reject{|n| n == :GoogleHash}.map{|n| eval n}
|
17
|
-
|
18
|
-
for name in all do
|
19
|
-
GC.start
|
20
|
-
subject = name.new
|
21
|
-
puts
|
22
|
-
puts name
|
23
|
-
|
24
|
-
subject = name.new
|
25
|
-
meas( "populate string ") { num.times {|n| subject['abc'] = 4 } } rescue nil
|
26
|
-
subject = name.new
|
27
|
-
meas( "populate symbol") { num.times {|n| subject[:abc] = 4} } rescue nil
|
28
|
-
|
29
|
-
meas( "populate int") { num.times {|n| subject[n] = 4}}
|
30
|
-
meas("each") { subject.each{|k, v| } }
|
31
|
-
|
32
|
-
begin
|
33
|
-
subject = name.new
|
34
|
-
subject[3] = 4
|
35
|
-
meas("lookup int") { num.times {|n| subject[3]}}
|
36
|
-
subject['abc'] = 3
|
37
|
-
subject[:abc] = 3
|
38
|
-
|
39
|
-
meas("lookup string") { num.times {|n| subject['abc']}}
|
40
|
-
meas( "lookup symbol" ) { num.times {|n| subject[:abc]}}
|
41
|
-
rescue
|
42
|
-
end
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
num = 200_000
|
47
|
-
go num if $0 ==__FILE__
|
data/ext/sparsehash-1.5.2/NEWS
DELETED
File without changes
|
@@ -1,1375 +0,0 @@
|
|
1
|
-
// Copyright (c) 2005, Google Inc.
|
2
|
-
// All rights reserved.
|
3
|
-
//
|
4
|
-
// Redistribution and use in source and binary forms, with or without
|
5
|
-
// modification, are permitted provided that the following conditions are
|
6
|
-
// met:
|
7
|
-
//
|
8
|
-
// * Redistributions of source code must retain the above copyright
|
9
|
-
// notice, this list of conditions and the following disclaimer.
|
10
|
-
// * Redistributions in binary form must reproduce the above
|
11
|
-
// copyright notice, this list of conditions and the following disclaimer
|
12
|
-
// in the documentation and/or other materials provided with the
|
13
|
-
// distribution.
|
14
|
-
// * Neither the name of Google Inc. nor the names of its
|
15
|
-
// contributors may be used to endorse or promote products derived from
|
16
|
-
// this software without specific prior written permission.
|
17
|
-
//
|
18
|
-
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19
|
-
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20
|
-
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
21
|
-
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22
|
-
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23
|
-
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
24
|
-
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
25
|
-
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
26
|
-
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27
|
-
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28
|
-
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29
|
-
|
30
|
-
// ---
|
31
|
-
// Author: Craig Silverstein
|
32
|
-
//
|
33
|
-
// This tests <google/sparsehash/densehashtable.h>
|
34
|
-
// This tests <google/dense_hash_set>
|
35
|
-
// This tests <google/dense_hash_map>
|
36
|
-
// This tests <google/sparsehash/sparsehashtable.h>
|
37
|
-
// This tests <google/sparse_hash_set>
|
38
|
-
// This tests <google/sparse_hash_map>
|
39
|
-
|
40
|
-
// Since {dense,sparse}hashtable is templatized, it's important that
|
41
|
-
// we test every function in every class in this file -- not just to
|
42
|
-
// see if it works, but even if it compiles.
|
43
|
-
|
44
|
-
#include "config.h"
|
45
|
-
#include <stdio.h>
|
46
|
-
#include <sys/stat.h> // for stat()
|
47
|
-
#ifdef HAVE_UNISTD_H
|
48
|
-
#include <unistd.h> // for unlink()
|
49
|
-
#endif
|
50
|
-
#include <string.h>
|
51
|
-
#include <time.h> // for silly random-number-seed generator
|
52
|
-
#include <math.h> // for sqrt()
|
53
|
-
#include <map>
|
54
|
-
#include <set>
|
55
|
-
#include <iterator> // for insert_iterator
|
56
|
-
#include <iostream>
|
57
|
-
#include <iomanip> // for setprecision()
|
58
|
-
#include <string>
|
59
|
-
#include HASH_FUN_H // defined in config.h
|
60
|
-
#include <google/type_traits.h>
|
61
|
-
#include <google/dense_hash_map>
|
62
|
-
#include <google/dense_hash_set>
|
63
|
-
#include <google/sparsehash/densehashtable.h>
|
64
|
-
#include <google/sparse_hash_map>
|
65
|
-
#include <google/sparse_hash_set>
|
66
|
-
#include <google/sparsehash/sparsehashtable.h>
|
67
|
-
|
68
|
-
// Otherwise, VC++7 warns about size_t -> int in the cout logging lines
|
69
|
-
#ifdef _MSC_VER
|
70
|
-
#pragma warning(disable:4267)
|
71
|
-
#endif
|
72
|
-
|
73
|
-
using GOOGLE_NAMESPACE::sparse_hash_map;
|
74
|
-
using GOOGLE_NAMESPACE::dense_hash_map;
|
75
|
-
using GOOGLE_NAMESPACE::sparse_hash_set;
|
76
|
-
using GOOGLE_NAMESPACE::dense_hash_set;
|
77
|
-
using GOOGLE_NAMESPACE::sparse_hashtable;
|
78
|
-
using GOOGLE_NAMESPACE::dense_hashtable;
|
79
|
-
using STL_NAMESPACE::map;
|
80
|
-
using STL_NAMESPACE::set;
|
81
|
-
using STL_NAMESPACE::pair;
|
82
|
-
using STL_NAMESPACE::make_pair;
|
83
|
-
using STL_NAMESPACE::string;
|
84
|
-
using STL_NAMESPACE::insert_iterator;
|
85
|
-
using STL_NAMESPACE::allocator;
|
86
|
-
using STL_NAMESPACE::equal_to;
|
87
|
-
using STL_NAMESPACE::ostream;
|
88
|
-
|
89
|
-
#define LOGF STL_NAMESPACE::cout // where we log to; LOGF is a historical name
|
90
|
-
|
91
|
-
#define CHECK(cond) do { \
|
92
|
-
if (!(cond)) { \
|
93
|
-
LOGF << "Test failed: " #cond "\n"; \
|
94
|
-
exit(1); \
|
95
|
-
} \
|
96
|
-
} while (0)
|
97
|
-
|
98
|
-
#define CHECK_EQ(a, b) CHECK((a) == (b))
|
99
|
-
#define CHECK_LT(a, b) CHECK((a) < (b))
|
100
|
-
#define CHECK_GE(a, b) CHECK((a) >= (b))
|
101
|
-
|
102
|
-
#ifndef _MSC_VER
|
103
|
-
static string TmpFile(const char* basename) {
|
104
|
-
return string("/tmp/") + basename;
|
105
|
-
}
|
106
|
-
#endif
|
107
|
-
|
108
|
-
const char *words[] = {"Baffin\n", // in /usr/dict/words
|
109
|
-
"Boffin\n", // not in
|
110
|
-
"baffin\n", // not in
|
111
|
-
"genial\n", // last word in
|
112
|
-
"Aarhus\n", // first word alphabetically
|
113
|
-
"Zurich\n", // last word alphabetically
|
114
|
-
"Getty\n",
|
115
|
-
};
|
116
|
-
|
117
|
-
const char *nwords[] = {"Boffin\n",
|
118
|
-
"baffin\n",
|
119
|
-
};
|
120
|
-
|
121
|
-
const char *default_dict[] = {"Aarhus\n",
|
122
|
-
"aback\n",
|
123
|
-
"abandon\n",
|
124
|
-
"Baffin\n",
|
125
|
-
"baffle\n",
|
126
|
-
"bagged\n",
|
127
|
-
"congenial\n",
|
128
|
-
"genial\n",
|
129
|
-
"Getty\n",
|
130
|
-
"indiscreet\n",
|
131
|
-
"linens\n",
|
132
|
-
"pence\n",
|
133
|
-
"reassure\n",
|
134
|
-
"sequel\n",
|
135
|
-
"zoning\n",
|
136
|
-
"zoo\n",
|
137
|
-
"Zurich\n",
|
138
|
-
};
|
139
|
-
|
140
|
-
// Apparently identity is not stl-standard, so we define our own
|
141
|
-
template<class Value>
|
142
|
-
struct Identity {
|
143
|
-
Value& operator()(Value& v) const { return v; }
|
144
|
-
const Value& operator()(const Value& v) const { return v; }
|
145
|
-
};
|
146
|
-
|
147
|
-
// Likewise, it's not standard to hash a string pre-tr1. Luckily, it is a char*
|
148
|
-
#ifdef HAVE_UNORDERED_MAP
|
149
|
-
typedef SPARSEHASH_HASH<string> StrHash;
|
150
|
-
struct CharStarHash {
|
151
|
-
size_t operator()(const char* s) const {
|
152
|
-
return StrHash()(string(s));
|
153
|
-
}
|
154
|
-
};
|
155
|
-
#else
|
156
|
-
typedef SPARSEHASH_HASH<const char*> CharStarHash;
|
157
|
-
struct StrHash {
|
158
|
-
size_t operator()(const string& s) const {
|
159
|
-
return SPARSEHASH_HASH<const char*>()(s.c_str());
|
160
|
-
}
|
161
|
-
};
|
162
|
-
#endif
|
163
|
-
|
164
|
-
// Let us log the pairs that make up a hash_map
|
165
|
-
template<class P1, class P2>
|
166
|
-
ostream& operator<<(ostream& s, const pair<P1, P2>& p) {
|
167
|
-
s << "pair(" << p.first << ", " << p.second << ")";
|
168
|
-
return s;
|
169
|
-
}
|
170
|
-
|
171
|
-
struct strcmp_fnc {
|
172
|
-
bool operator()(const char* s1, const char* s2) const {
|
173
|
-
return ((s1 == 0 && s2 == 0) ||
|
174
|
-
(s1 && s2 && *s1 == *s2 && strcmp(s1, s2) == 0));
|
175
|
-
}
|
176
|
-
};
|
177
|
-
|
178
|
-
namespace {
|
179
|
-
|
180
|
-
template <class T, class H, class I, class S, class C, class A>
|
181
|
-
void set_empty_key(sparse_hashtable<T,T,H,I,S,C,A> *ht, T val) {
|
182
|
-
}
|
183
|
-
|
184
|
-
template <class T, class H, class C>
|
185
|
-
void set_empty_key(sparse_hash_set<T,H,C> *ht, T val) {
|
186
|
-
}
|
187
|
-
|
188
|
-
template <class K, class V, class H, class C>
|
189
|
-
void set_empty_key(sparse_hash_map<K,V,H,C> *ht, K val) {
|
190
|
-
}
|
191
|
-
|
192
|
-
template <class T, class H, class I, class S, class C, class A>
|
193
|
-
void set_empty_key(dense_hashtable<T,T,H,I,S,C,A> *ht, T val) {
|
194
|
-
ht->set_empty_key(val);
|
195
|
-
}
|
196
|
-
|
197
|
-
template <class T, class H, class C>
|
198
|
-
void set_empty_key(dense_hash_set<T,H,C> *ht, T val) {
|
199
|
-
ht->set_empty_key(val);
|
200
|
-
}
|
201
|
-
|
202
|
-
template <class K, class V, class H, class C>
|
203
|
-
void set_empty_key(dense_hash_map<K,V,H,C> *ht, K val) {
|
204
|
-
ht->set_empty_key(val);
|
205
|
-
}
|
206
|
-
|
207
|
-
template <class T, class H, class I, class S, class C, class A>
|
208
|
-
bool clear_no_resize(sparse_hashtable<T,T,H,I,S,C,A> *ht) {
|
209
|
-
return false;
|
210
|
-
}
|
211
|
-
|
212
|
-
template <class T, class H, class C>
|
213
|
-
bool clear_no_resize(sparse_hash_set<T,H,C> *ht) {
|
214
|
-
return false;
|
215
|
-
}
|
216
|
-
|
217
|
-
template <class K, class V, class H, class C>
|
218
|
-
bool clear_no_resize(sparse_hash_map<K,V,H,C> *ht) {
|
219
|
-
return false;
|
220
|
-
}
|
221
|
-
|
222
|
-
template <class T, class H, class I, class S, class C, class A>
|
223
|
-
bool clear_no_resize(dense_hashtable<T,T,H,I,S,C,A> *ht) {
|
224
|
-
ht->clear_no_resize();
|
225
|
-
return true;
|
226
|
-
}
|
227
|
-
|
228
|
-
template <class T, class H, class C>
|
229
|
-
bool clear_no_resize(dense_hash_set<T,H,C> *ht) {
|
230
|
-
ht->clear_no_resize();
|
231
|
-
return true;
|
232
|
-
}
|
233
|
-
|
234
|
-
template <class K, class V, class H, class C>
|
235
|
-
bool clear_no_resize(dense_hash_map<K,V,H,C> *ht) {
|
236
|
-
ht->clear_no_resize();
|
237
|
-
return true;
|
238
|
-
}
|
239
|
-
|
240
|
-
template <class T, class H, class I, class S, class C, class A>
|
241
|
-
void insert(dense_hashtable<T,T,H,I,S,C,A> *ht, T val) {
|
242
|
-
ht->insert(val);
|
243
|
-
}
|
244
|
-
|
245
|
-
template <class T, class H, class C>
|
246
|
-
void insert(dense_hash_set<T,H,C> *ht, T val) {
|
247
|
-
ht->insert(val);
|
248
|
-
}
|
249
|
-
|
250
|
-
template <class K, class V, class H, class C>
|
251
|
-
void insert(dense_hash_map<K,V,H,C> *ht, K val) {
|
252
|
-
ht->insert(pair<K,V>(val,V()));
|
253
|
-
}
|
254
|
-
|
255
|
-
template <class T, class H, class I, class S, class C, class A>
|
256
|
-
void insert(sparse_hashtable<T,T,H,I,S,C,A> *ht, T val) {
|
257
|
-
ht->insert(val);
|
258
|
-
}
|
259
|
-
|
260
|
-
template <class T, class H, class C>
|
261
|
-
void insert(sparse_hash_set<T,H,C> *ht, T val) {
|
262
|
-
ht->insert(val);
|
263
|
-
}
|
264
|
-
|
265
|
-
template <class K, class V, class H, class C>
|
266
|
-
void insert(sparse_hash_map<K,V,H,C> *ht, K val) {
|
267
|
-
ht->insert(pair<K,V>(val,V()));
|
268
|
-
}
|
269
|
-
|
270
|
-
template <class HT, class Iterator>
|
271
|
-
void insert(HT *ht, Iterator begin, Iterator end) {
|
272
|
-
ht->insert(begin, end);
|
273
|
-
}
|
274
|
-
|
275
|
-
// For hashtable's and hash_set's, the iterator insert works fine (and
|
276
|
-
// is used). But for the hash_map's, the iterator insert expects the
|
277
|
-
// iterators to point to pair's. So by looping over and calling insert
|
278
|
-
// on each element individually, the code below automatically expands
|
279
|
-
// into inserting a pair.
|
280
|
-
template <class K, class V, class H, class C, class Iterator>
|
281
|
-
void insert(dense_hash_map<K,V,H,C> *ht, Iterator begin, Iterator end) {
|
282
|
-
while (begin != end) {
|
283
|
-
insert(ht, *begin);
|
284
|
-
++begin;
|
285
|
-
}
|
286
|
-
}
|
287
|
-
|
288
|
-
template <class K, class V, class H, class C, class Iterator>
|
289
|
-
void insert(sparse_hash_map<K,V,H,C> *ht, Iterator begin, Iterator end) {
|
290
|
-
while (begin != end) {
|
291
|
-
insert(ht, *begin);
|
292
|
-
++begin;
|
293
|
-
}
|
294
|
-
}
|
295
|
-
|
296
|
-
// A version of insert that uses the insert_iterator. But insert_iterator
|
297
|
-
// isn't defined for the low level hashtable classes, so we just punt to insert.
|
298
|
-
|
299
|
-
template <class T, class H, class I, class S, class C, class A>
|
300
|
-
void iterator_insert(dense_hashtable<T,T,H,I,S,C,A>* ht, T val,
|
301
|
-
insert_iterator<dense_hashtable<T,T,H,I,S,C,A> >* ) {
|
302
|
-
ht->insert(val);
|
303
|
-
}
|
304
|
-
|
305
|
-
template <class T, class H, class C>
|
306
|
-
void iterator_insert(dense_hash_set<T,H,C>* , T val,
|
307
|
-
insert_iterator<dense_hash_set<T,H,C> >* ii) {
|
308
|
-
*(*ii)++ = val;
|
309
|
-
}
|
310
|
-
|
311
|
-
template <class K, class V, class H, class C>
|
312
|
-
void iterator_insert(dense_hash_map<K,V,H,C>* , K val,
|
313
|
-
insert_iterator<dense_hash_map<K,V,H,C> >* ii) {
|
314
|
-
*(*ii)++ = pair<K,V>(val,V());
|
315
|
-
}
|
316
|
-
|
317
|
-
template <class T, class H, class I, class S, class C, class A>
|
318
|
-
void iterator_insert(sparse_hashtable<T,T,H,I,S,C,A>* ht, T val,
|
319
|
-
insert_iterator<sparse_hashtable<T,T,H,I,S,C,A> >* ) {
|
320
|
-
ht->insert(val);
|
321
|
-
}
|
322
|
-
|
323
|
-
template <class T, class H, class C>
|
324
|
-
void iterator_insert(sparse_hash_set<T,H,C>* , T val,
|
325
|
-
insert_iterator<sparse_hash_set<T,H,C> >* ii) {
|
326
|
-
*(*ii)++ = val;
|
327
|
-
}
|
328
|
-
|
329
|
-
template <class K, class V, class H, class C>
|
330
|
-
void iterator_insert(sparse_hash_map<K,V,H,C> *, K val,
|
331
|
-
insert_iterator<sparse_hash_map<K,V,H,C> >* ii) {
|
332
|
-
*(*ii)++ = pair<K,V>(val,V());
|
333
|
-
}
|
334
|
-
|
335
|
-
|
336
|
-
void write_item(FILE *fp, const char *val) {
|
337
|
-
fwrite(val, strlen(val), 1, fp); // \n serves to separate
|
338
|
-
}
|
339
|
-
|
340
|
-
// The weird 'const' declarations are desired by the compiler. Yucko.
|
341
|
-
void write_item(FILE *fp, const pair<char*const,int> &val) {
|
342
|
-
fwrite(val.first, strlen(val.first), 1, fp);
|
343
|
-
}
|
344
|
-
|
345
|
-
void write_item(FILE *fp, const string &val) {
|
346
|
-
fwrite(val.data(), val.length(), 1, fp); // \n serves to separate
|
347
|
-
}
|
348
|
-
|
349
|
-
// The weird 'const' declarations are desired by the compiler. Yucko.
|
350
|
-
void write_item(FILE *fp, const pair<const string,int> &val) {
|
351
|
-
fwrite(val.first.data(), val.first.length(), 1, fp);
|
352
|
-
}
|
353
|
-
|
354
|
-
char* read_line(FILE* fp, char* line, int linesize) {
|
355
|
-
if ( fgets(line, linesize, fp) == NULL )
|
356
|
-
return NULL;
|
357
|
-
// normalize windows files :-(
|
358
|
-
const size_t linelen = strlen(line);
|
359
|
-
if ( linelen >= 2 && line[linelen-2] == '\r' && line[linelen-1] == '\n' ) {
|
360
|
-
line[linelen-2] = '\n';
|
361
|
-
line[linelen-1] = '\0';
|
362
|
-
}
|
363
|
-
return line;
|
364
|
-
}
|
365
|
-
|
366
|
-
void read_item(FILE *fp, char*const* val) {
|
367
|
-
char line[1024];
|
368
|
-
read_line(fp, line, sizeof(line));
|
369
|
-
char **p = const_cast<char**>(val);
|
370
|
-
*p = strdup(line);
|
371
|
-
}
|
372
|
-
|
373
|
-
void read_item(FILE *fp, pair<char*const,int> *val) {
|
374
|
-
char line[1024];
|
375
|
-
read_line(fp, line, sizeof(line));
|
376
|
-
char **p = const_cast<char**>(&val->first);
|
377
|
-
*p = strdup(line);
|
378
|
-
}
|
379
|
-
|
380
|
-
void read_item(FILE *fp, const string* val) {
|
381
|
-
char line[1024];
|
382
|
-
read_line(fp, line, sizeof(line));
|
383
|
-
new(const_cast<string*>(val)) string(line); // need to use placement new
|
384
|
-
}
|
385
|
-
|
386
|
-
void read_item(FILE *fp, pair<const string,int> *val) {
|
387
|
-
char line[1024];
|
388
|
-
read_line(fp, line, sizeof(line));
|
389
|
-
new(const_cast<string*>(&val->first)) string(line);
|
390
|
-
}
|
391
|
-
|
392
|
-
void free_item(char*const* val) {
|
393
|
-
free(*val);
|
394
|
-
}
|
395
|
-
|
396
|
-
void free_item(pair<char*const,int> *val) {
|
397
|
-
free(val->first);
|
398
|
-
}
|
399
|
-
|
400
|
-
int get_int_item(int int_item) {
|
401
|
-
return int_item;
|
402
|
-
}
|
403
|
-
|
404
|
-
int get_int_item(pair<int, int> val) {
|
405
|
-
return val.first;
|
406
|
-
}
|
407
|
-
|
408
|
-
int getintkey(int i) { return i; }
|
409
|
-
|
410
|
-
int getintkey(const pair<int, int> &p) { return p.first; }
|
411
|
-
|
412
|
-
} // end anonymous namespace
|
413
|
-
|
414
|
-
// Performs tests where the hashtable's value type is assumed to be int.
|
415
|
-
template <class htint>
|
416
|
-
void test_int() {
|
417
|
-
htint x;
|
418
|
-
htint y(1000);
|
419
|
-
htint z(64);
|
420
|
-
set_empty_key(&x, 0xefefef);
|
421
|
-
set_empty_key(&y, 0xefefef);
|
422
|
-
set_empty_key(&z, 0xefefef);
|
423
|
-
|
424
|
-
CHECK(y.empty());
|
425
|
-
insert(&y, 1);
|
426
|
-
CHECK(!y.empty());
|
427
|
-
insert(&y, 11);
|
428
|
-
insert(&y, 111);
|
429
|
-
insert(&y, 1111);
|
430
|
-
insert(&y, 11111);
|
431
|
-
insert(&y, 111111);
|
432
|
-
insert(&y, 1111111); // 1M, more or less
|
433
|
-
insert(&y, 11111111);
|
434
|
-
insert(&y, 111111111);
|
435
|
-
insert(&y, 1111111111); // 1B, more or less
|
436
|
-
for ( int i = 0; i < 64; ++i )
|
437
|
-
insert(&z, i);
|
438
|
-
// test the second half of the insert with an insert_iterator
|
439
|
-
insert_iterator<htint> insert_iter(z, z.begin());
|
440
|
-
for ( int i = 32; i < 64; ++i )
|
441
|
-
iterator_insert(&z, i, &insert_iter);
|
442
|
-
|
443
|
-
// only perform the following CHECKs for
|
444
|
-
// dense{hashtable, _hash_set, _hash_map}
|
445
|
-
if (clear_no_resize(&x)) {
|
446
|
-
// make sure x has to increase its number of buckets
|
447
|
-
typename htint::size_type empty_bucket_count = x.bucket_count();
|
448
|
-
int last_element = 0;
|
449
|
-
while (x.bucket_count() == empty_bucket_count) {
|
450
|
-
insert(&x, last_element);
|
451
|
-
++last_element;
|
452
|
-
}
|
453
|
-
// if clear_no_resize is supported (i.e. htint is a
|
454
|
-
// dense{hashtable,_hash_set,_hash_map}), it should leave the bucket_count
|
455
|
-
// as is.
|
456
|
-
typename htint::size_type last_bucket_count = x.bucket_count();
|
457
|
-
clear_no_resize(&x);
|
458
|
-
CHECK(last_bucket_count == x.bucket_count());
|
459
|
-
CHECK(x.empty());
|
460
|
-
LOGF << "x has " << x.bucket_count() << " buckets\n";
|
461
|
-
LOGF << "x size " << x.size() << "\n";
|
462
|
-
// when inserting the same number of elements again, no resize should be
|
463
|
-
// necessary
|
464
|
-
for (int i = 0; i < last_element; ++i) {
|
465
|
-
insert(&x, i);
|
466
|
-
CHECK(x.bucket_count() == last_bucket_count);
|
467
|
-
}
|
468
|
-
}
|
469
|
-
|
470
|
-
for ( typename htint::const_iterator it = y.begin(); it != y.end(); ++it )
|
471
|
-
LOGF << "y: " << get_int_item(*it) << "\n";
|
472
|
-
z.insert(y.begin(), y.end());
|
473
|
-
swap(y,z);
|
474
|
-
for ( typename htint::iterator it = y.begin(); it != y.end(); ++it )
|
475
|
-
LOGF << "y+z: " << get_int_item(*it) << "\n";
|
476
|
-
LOGF << "z has " << z.bucket_count() << " buckets\n";
|
477
|
-
LOGF << "y has " << y.bucket_count() << " buckets\n";
|
478
|
-
LOGF << "z size: " << z.size() << "\n";
|
479
|
-
|
480
|
-
for (int i = 0; i < 64; ++i)
|
481
|
-
CHECK(y.find(i) != y.end());
|
482
|
-
|
483
|
-
CHECK(z.size() == 10);
|
484
|
-
z.set_deleted_key(1010101010); // an unused value
|
485
|
-
z.erase(11111);
|
486
|
-
CHECK(z.size() == 9);
|
487
|
-
insert(&z, 11111); // should retake deleted value
|
488
|
-
CHECK(z.size() == 10);
|
489
|
-
// Do the delete/insert again. Last time we probably resized; this time no
|
490
|
-
z.erase(11111);
|
491
|
-
insert(&z, 11111); // should retake deleted value
|
492
|
-
CHECK(z.size() == 10);
|
493
|
-
|
494
|
-
z.erase(-11111); // shouldn't do anything
|
495
|
-
CHECK(z.size() == 10);
|
496
|
-
z.erase(1);
|
497
|
-
CHECK(z.size() == 9);
|
498
|
-
|
499
|
-
typename htint::iterator itdel = z.find(1111);
|
500
|
-
pair<typename htint::iterator,typename htint::iterator> itdel2
|
501
|
-
= z.equal_range(1111);
|
502
|
-
CHECK(itdel2.first != z.end());
|
503
|
-
CHECK(&*itdel2.first == &*itdel); // while we're here, check equal_range()
|
504
|
-
CHECK(itdel2.second == ++itdel2.first);
|
505
|
-
pair<typename htint::const_iterator,typename htint::const_iterator> itdel3
|
506
|
-
= const_cast<const htint*>(&z)->equal_range(1111);
|
507
|
-
CHECK(itdel3.first != z.end());
|
508
|
-
CHECK(&*itdel3.first == &*itdel);
|
509
|
-
CHECK(itdel3.second == ++itdel3.first);
|
510
|
-
|
511
|
-
z.erase(itdel);
|
512
|
-
CHECK(z.size() == 8);
|
513
|
-
itdel2 = z.equal_range(1111);
|
514
|
-
CHECK(itdel2.first == z.end());
|
515
|
-
CHECK(itdel2.second == itdel2.first);
|
516
|
-
itdel3 = const_cast<const htint*>(&z)->equal_range(1111);
|
517
|
-
CHECK(itdel3.first == z.end());
|
518
|
-
CHECK(itdel3.second == itdel3.first);
|
519
|
-
|
520
|
-
itdel = z.find(2222); // should be end()
|
521
|
-
z.erase(itdel); // shouldn't do anything
|
522
|
-
CHECK(z.size() == 8);
|
523
|
-
for ( typename htint::const_iterator it = z.begin(); it != z.end(); ++it )
|
524
|
-
LOGF << "y: " << get_int_item(*it) << "\n";
|
525
|
-
z.set_deleted_key(1010101011); // a different unused value
|
526
|
-
for ( typename htint::const_iterator it = z.begin(); it != z.end(); ++it )
|
527
|
-
LOGF << "y: " << get_int_item(*it) << "\n";
|
528
|
-
LOGF << "That's " << z.size() << " elements\n";
|
529
|
-
z.erase(z.begin(), z.end());
|
530
|
-
CHECK(z.empty());
|
531
|
-
|
532
|
-
y.clear();
|
533
|
-
CHECK(y.empty());
|
534
|
-
LOGF << "y has " << y.bucket_count() << " buckets\n";
|
535
|
-
}
|
536
|
-
|
537
|
-
// Performs tests where the hashtable's value type is assumed to be char*.
|
538
|
-
// The read_write parameters specifies whether the read/write tests
|
539
|
-
// should be performed. Note that densehashtable::write_metdata is not
|
540
|
-
// implemented, so we only do the read/write tests for the
|
541
|
-
// sparsehashtable varieties.
|
542
|
-
template <class ht>
|
543
|
-
void test_charptr(bool read_write) {
|
544
|
-
ht w;
|
545
|
-
set_empty_key(&w, (char*) NULL);
|
546
|
-
insert(&w, const_cast<char **>(nwords),
|
547
|
-
const_cast<char **>(nwords) + sizeof(nwords) / sizeof(*nwords));
|
548
|
-
LOGF << "w has " << w.size() << " items\n";
|
549
|
-
CHECK(w.size() == 2);
|
550
|
-
CHECK(w == w);
|
551
|
-
|
552
|
-
ht x;
|
553
|
-
set_empty_key(&x, (char*) NULL);
|
554
|
-
long dict_size = 1; // for size stats -- can't be 0 'cause of division
|
555
|
-
|
556
|
-
map<string, int> counts;
|
557
|
-
// Hash the dictionary
|
558
|
-
{
|
559
|
-
// automake says 'look for all data files in $srcdir.' OK.
|
560
|
-
string filestr = (string(getenv("srcdir") ? getenv("srcdir") : ".") +
|
561
|
-
"/src/words");
|
562
|
-
const char* file = filestr.c_str();
|
563
|
-
FILE *fp = fopen(file, "rb");
|
564
|
-
if ( fp == NULL ) {
|
565
|
-
LOGF << "Can't open " << file << ", using small, built-in dict...\n";
|
566
|
-
for (int i = 0; i < sizeof(default_dict)/sizeof(*default_dict); ++i) {
|
567
|
-
insert(&x, strdup(default_dict[i]));
|
568
|
-
counts[default_dict[i]] = 0;
|
569
|
-
}
|
570
|
-
} else {
|
571
|
-
char line[1024];
|
572
|
-
while ( read_line(fp, line, sizeof(line)) ) {
|
573
|
-
insert(&x, strdup(line));
|
574
|
-
counts[line] = 0;
|
575
|
-
}
|
576
|
-
LOGF << "Read " << x.size() << " words from " << file << "\n";
|
577
|
-
fclose(fp);
|
578
|
-
struct stat buf;
|
579
|
-
stat(file, &buf);
|
580
|
-
dict_size = buf.st_size;
|
581
|
-
LOGF << "Size of " << file << ": " << buf.st_size << " bytes\n";
|
582
|
-
}
|
583
|
-
for (char **word = const_cast<char **>(words);
|
584
|
-
word < const_cast<char **>(words) + sizeof(words) / sizeof(*words);
|
585
|
-
++word ) {
|
586
|
-
if (x.find(*word) == x.end()) {
|
587
|
-
CHECK(w.find(*word) != w.end());
|
588
|
-
} else {
|
589
|
-
CHECK(w.find(*word) == w.end());
|
590
|
-
}
|
591
|
-
}
|
592
|
-
}
|
593
|
-
CHECK(counts.size() == x.size());
|
594
|
-
|
595
|
-
// Save the hashtable.
|
596
|
-
if (read_write) {
|
597
|
-
const string file_string = TmpFile("#hashtable_unittest_dicthash");
|
598
|
-
const char* file = file_string.c_str();
|
599
|
-
FILE *fp = fopen(file, "wb");
|
600
|
-
if ( fp == NULL ) {
|
601
|
-
// maybe we can't write to /tmp/. Try the current directory
|
602
|
-
file = "#hashtable_unittest_dicthash";
|
603
|
-
fp = fopen(file, "wb");
|
604
|
-
}
|
605
|
-
if ( fp == NULL ) {
|
606
|
-
LOGF << "Can't open " << file << " skipping hashtable save...\n";
|
607
|
-
} else {
|
608
|
-
x.write_metadata(fp); // this only writes meta-information
|
609
|
-
int write_count = 0;
|
610
|
-
for ( typename ht::iterator it = x.begin(); it != x.end(); ++it ) {
|
611
|
-
write_item(fp, *it);
|
612
|
-
free_item(&(*it));
|
613
|
-
++write_count;
|
614
|
-
}
|
615
|
-
LOGF << "Wrote " << write_count << " words to " << file << "\n";
|
616
|
-
fclose(fp);
|
617
|
-
struct stat buf;
|
618
|
-
stat(file, &buf);
|
619
|
-
LOGF << "Size of " << file << ": " << buf.st_size << " bytes\n";
|
620
|
-
LOGF << STL_NAMESPACE::setprecision(3)
|
621
|
-
<< "Hashtable overhead "
|
622
|
-
<< (buf.st_size - dict_size) * 100.0 / dict_size
|
623
|
-
<< "% ("
|
624
|
-
<< (buf.st_size - dict_size) * 8.0 / write_count
|
625
|
-
<< " bits/entry)\n";
|
626
|
-
x.clear();
|
627
|
-
|
628
|
-
// Load the hashtable
|
629
|
-
fp = fopen(file, "rb");
|
630
|
-
if ( fp == NULL ) {
|
631
|
-
LOGF << "Can't open " << file << " skipping hashtable reload...\n";
|
632
|
-
} else {
|
633
|
-
x.read_metadata(fp); // reads metainformation
|
634
|
-
LOGF << "Hashtable size: " << x.size() << "\n";
|
635
|
-
int read_count = 0;
|
636
|
-
for ( typename ht::iterator it = x.begin(); it != x.end(); ++it ) {
|
637
|
-
read_item(fp, &(*it));
|
638
|
-
++read_count;
|
639
|
-
}
|
640
|
-
LOGF << "Read " << read_count << " words from " << file << "\n";
|
641
|
-
fclose(fp);
|
642
|
-
unlink(file);
|
643
|
-
for ( char **word = const_cast<char **>(words);
|
644
|
-
word < const_cast<char **>(words) + sizeof(words) / sizeof(*words);
|
645
|
-
++word ) {
|
646
|
-
if (x.find(*word) == x.end()) {
|
647
|
-
CHECK(w.find(*word) != w.end());
|
648
|
-
} else {
|
649
|
-
CHECK(w.find(*word) == w.end());
|
650
|
-
}
|
651
|
-
}
|
652
|
-
}
|
653
|
-
}
|
654
|
-
}
|
655
|
-
for ( typename ht::iterator it = x.begin(); it != x.end(); ++it ) {
|
656
|
-
free_item(&(*it));
|
657
|
-
}
|
658
|
-
}
|
659
|
-
|
660
|
-
// Perform tests where the hashtable's value type is assumed to
|
661
|
-
// be string.
|
662
|
-
// TODO(austern): factor out the bulk of test_charptr and test_string
|
663
|
-
// into a common function.
|
664
|
-
template <class ht>
|
665
|
-
void test_string(bool read_write) {
|
666
|
-
ht w;
|
667
|
-
set_empty_key(&w, string("-*- empty key -*-"));
|
668
|
-
const int N = sizeof(nwords) / sizeof(*nwords);
|
669
|
-
string* nwords1 = new string[N];
|
670
|
-
for (int i = 0; i < N; ++i)
|
671
|
-
nwords1[i] = nwords[i];
|
672
|
-
insert(&w, nwords1, nwords1 + N);
|
673
|
-
delete[] nwords1;
|
674
|
-
LOGF << "w has " << w.size() << " items\n";
|
675
|
-
CHECK(w.size() == 2);
|
676
|
-
CHECK(w == w);
|
677
|
-
|
678
|
-
ht x;
|
679
|
-
set_empty_key(&x, string("-*- empty key -*-"));
|
680
|
-
long dict_size = 1; // for size stats -- can't be 0 'cause of division
|
681
|
-
|
682
|
-
map<string, int> counts;
|
683
|
-
// Hash the dictionary
|
684
|
-
{
|
685
|
-
// automake says 'look for all data files in $srcdir.' OK.
|
686
|
-
string filestr = (string(getenv("srcdir") ? getenv("srcdir") : ".") +
|
687
|
-
"/src/words");
|
688
|
-
const char* file = filestr.c_str();
|
689
|
-
FILE *fp = fopen(file, "rb");
|
690
|
-
if ( fp == NULL ) {
|
691
|
-
LOGF << "Can't open " << file << ", using small, built-in dict...\n";
|
692
|
-
for (int i = 0; i < sizeof(default_dict)/sizeof(*default_dict); ++i) {
|
693
|
-
insert(&x, string(default_dict[i]));
|
694
|
-
counts[default_dict[i]] = 0;
|
695
|
-
}
|
696
|
-
} else {
|
697
|
-
char line[1024];
|
698
|
-
while ( fgets(line, sizeof(line), fp) ) {
|
699
|
-
insert(&x, string(line));
|
700
|
-
counts[line] = 0;
|
701
|
-
}
|
702
|
-
LOGF << "Read " << x.size() << " words from " << file << "\n";
|
703
|
-
fclose(fp);
|
704
|
-
struct stat buf;
|
705
|
-
stat(file, &buf);
|
706
|
-
dict_size = buf.st_size;
|
707
|
-
LOGF << "Size of " << file << ": " << buf.st_size << " bytes\n";
|
708
|
-
}
|
709
|
-
for ( const char* const* word = words;
|
710
|
-
word < words + sizeof(words) / sizeof(*words);
|
711
|
-
++word ) {
|
712
|
-
if (x.find(*word) == x.end()) {
|
713
|
-
CHECK(w.find(*word) != w.end());
|
714
|
-
} else {
|
715
|
-
CHECK(w.find(*word) == w.end());
|
716
|
-
}
|
717
|
-
}
|
718
|
-
}
|
719
|
-
CHECK(counts.size() == x.size());
|
720
|
-
{
|
721
|
-
// verify that size() works correctly
|
722
|
-
int xcount = 0;
|
723
|
-
for ( typename ht::iterator it = x.begin(); it != x.end(); ++it ) {
|
724
|
-
++xcount;
|
725
|
-
}
|
726
|
-
CHECK(x.size() == xcount);
|
727
|
-
}
|
728
|
-
|
729
|
-
// Save the hashtable.
|
730
|
-
if (read_write) {
|
731
|
-
const string file_string = TmpFile("#hashtable_unittest_dicthash_str");
|
732
|
-
const char* file = file_string.c_str();
|
733
|
-
FILE *fp = fopen(file, "wb");
|
734
|
-
if ( fp == NULL ) {
|
735
|
-
// maybe we can't write to /tmp/. Try the current directory
|
736
|
-
file = "#hashtable_unittest_dicthash_str";
|
737
|
-
fp = fopen(file, "wb");
|
738
|
-
}
|
739
|
-
if ( fp == NULL ) {
|
740
|
-
LOGF << "Can't open " << file << " skipping hashtable save...\n";
|
741
|
-
} else {
|
742
|
-
x.write_metadata(fp); // this only writes meta-information
|
743
|
-
int write_count = 0;
|
744
|
-
for ( typename ht::iterator it = x.begin(); it != x.end(); ++it ) {
|
745
|
-
write_item(fp, *it);
|
746
|
-
++write_count;
|
747
|
-
}
|
748
|
-
LOGF << "Wrote " << write_count << " words to " << file << "\n";
|
749
|
-
fclose(fp);
|
750
|
-
struct stat buf;
|
751
|
-
stat(file, &buf);
|
752
|
-
LOGF << "Size of " << file << ": " << buf.st_size << " bytes\n";
|
753
|
-
LOGF << STL_NAMESPACE::setprecision(3)
|
754
|
-
<< "Hashtable overhead "
|
755
|
-
<< (buf.st_size - dict_size) * 100.0 / dict_size
|
756
|
-
<< "% ("
|
757
|
-
<< (buf.st_size - dict_size) * 8.0 / write_count
|
758
|
-
<< " bits/entry)\n";
|
759
|
-
x.clear();
|
760
|
-
|
761
|
-
// Load the hashtable
|
762
|
-
fp = fopen(file, "rb");
|
763
|
-
if ( fp == NULL ) {
|
764
|
-
LOGF << "Can't open " << file << " skipping hashtable reload...\n";
|
765
|
-
} else {
|
766
|
-
x.read_metadata(fp); // reads metainformation
|
767
|
-
LOGF << "Hashtable size: " << x.size() << "\n";
|
768
|
-
int count = 0;
|
769
|
-
for ( typename ht::iterator it = x.begin(); it != x.end(); ++it ) {
|
770
|
-
read_item(fp, &(*it));
|
771
|
-
++count;
|
772
|
-
}
|
773
|
-
LOGF << "Read " << count << " words from " << file << "\n";
|
774
|
-
fclose(fp);
|
775
|
-
unlink(file);
|
776
|
-
for ( const char* const* word = words;
|
777
|
-
word < words + sizeof(words) / sizeof(*words);
|
778
|
-
++word ) {
|
779
|
-
if (x.find(*word) == x.end()) {
|
780
|
-
CHECK(w.find(*word) != w.end());
|
781
|
-
} else {
|
782
|
-
CHECK(w.find(*word) == w.end());
|
783
|
-
}
|
784
|
-
}
|
785
|
-
}
|
786
|
-
}
|
787
|
-
}
|
788
|
-
|
789
|
-
// ensure that destruction is done properly in clear_no_resize()
|
790
|
-
if (!clear_no_resize(&w)) w.clear();
|
791
|
-
}
|
792
|
-
|
793
|
-
// The read_write parameters specifies whether the read/write tests
|
794
|
-
// should be performed. Note that densehashtable::write_metdata is not
|
795
|
-
// implemented, so we only do the read/write tests for the
|
796
|
-
// sparsehashtable varieties.
|
797
|
-
template<class ht, class htstr, class htint>
|
798
|
-
void test(bool read_write) {
|
799
|
-
test_int<htint>();
|
800
|
-
test_string<htstr>(read_write);
|
801
|
-
test_charptr<ht>(read_write);
|
802
|
-
}
|
803
|
-
|
804
|
-
// For data types with trivial copy-constructors and destructors, we
|
805
|
-
// should use an optimized routine for data-copying, that involves
|
806
|
-
// memmove. We test this by keeping count of how many times the
|
807
|
-
// copy-constructor is called; it should be much less with the
|
808
|
-
// optimized code.
|
809
|
-
|
810
|
-
class Memmove {
|
811
|
-
public:
|
812
|
-
Memmove(): i_(0) {}
|
813
|
-
explicit Memmove(int i): i_(i) {}
|
814
|
-
Memmove(const Memmove& that) {
|
815
|
-
this->i_ = that.i_;
|
816
|
-
num_copies_++;
|
817
|
-
}
|
818
|
-
|
819
|
-
int i_;
|
820
|
-
static int num_copies_;
|
821
|
-
};
|
822
|
-
int Memmove::num_copies_ = 0;
|
823
|
-
|
824
|
-
|
825
|
-
// This is what tells the hashtable code it can use memmove for this class:
|
826
|
-
_START_GOOGLE_NAMESPACE_
|
827
|
-
template<> struct has_trivial_copy<Memmove> : true_type { };
|
828
|
-
template<> struct has_trivial_destructor<Memmove> : true_type { };
|
829
|
-
_END_GOOGLE_NAMESPACE_
|
830
|
-
|
831
|
-
class NoMemmove {
|
832
|
-
public:
|
833
|
-
NoMemmove(): i_(0) {}
|
834
|
-
explicit NoMemmove(int i): i_(i) {}
|
835
|
-
NoMemmove(const NoMemmove& that) {
|
836
|
-
this->i_ = that.i_;
|
837
|
-
num_copies_++;
|
838
|
-
}
|
839
|
-
|
840
|
-
int i_;
|
841
|
-
static int num_copies_;
|
842
|
-
};
|
843
|
-
int NoMemmove::num_copies_ = 0;
|
844
|
-
|
845
|
-
void TestSimpleDataTypeOptimizations() {
|
846
|
-
{
|
847
|
-
sparse_hash_map<int, Memmove> memmove;
|
848
|
-
sparse_hash_map<int, NoMemmove> nomemmove;
|
849
|
-
|
850
|
-
Memmove::num_copies_ = 0; // reset
|
851
|
-
NoMemmove::num_copies_ = 0; // reset
|
852
|
-
for (int i = 10000; i > 0; i--) {
|
853
|
-
memmove[i] = Memmove(i);
|
854
|
-
}
|
855
|
-
for (int i = 10000; i > 0; i--) {
|
856
|
-
nomemmove[i] = NoMemmove(i);
|
857
|
-
}
|
858
|
-
LOGF << "sparse_hash_map copies for unoptimized/optimized cases: "
|
859
|
-
<< NoMemmove::num_copies_ << "/" << Memmove::num_copies_ << "\n";
|
860
|
-
CHECK(NoMemmove::num_copies_ > Memmove::num_copies_);
|
861
|
-
}
|
862
|
-
// Same should hold true for dense_hash_map
|
863
|
-
{
|
864
|
-
dense_hash_map<int, Memmove> memmove;
|
865
|
-
dense_hash_map<int, NoMemmove> nomemmove;
|
866
|
-
memmove.set_empty_key(0);
|
867
|
-
nomemmove.set_empty_key(0);
|
868
|
-
|
869
|
-
Memmove::num_copies_ = 0; // reset
|
870
|
-
NoMemmove::num_copies_ = 0; // reset
|
871
|
-
for (int i = 10000; i > 0; i--) {
|
872
|
-
memmove[i] = Memmove(i);
|
873
|
-
}
|
874
|
-
for (int i = 10000; i > 0; i--) {
|
875
|
-
nomemmove[i] = NoMemmove(i);
|
876
|
-
}
|
877
|
-
LOGF << "dense_hash_map copies for unoptimized/optimized cases: "
|
878
|
-
<< NoMemmove::num_copies_ << "/" << Memmove::num_copies_ << "\n";
|
879
|
-
CHECK(NoMemmove::num_copies_ > Memmove::num_copies_);
|
880
|
-
}
|
881
|
-
}
|
882
|
-
|
883
|
-
void TestShrinking() {
|
884
|
-
// We want to make sure that when we create a hashtable, and then
|
885
|
-
// add and delete one element, the size of the hashtable doesn't
|
886
|
-
// change.
|
887
|
-
{
|
888
|
-
sparse_hash_set<int> s;
|
889
|
-
s.set_deleted_key(0);
|
890
|
-
const int old_bucket_count = s.bucket_count();
|
891
|
-
s.insert(4);
|
892
|
-
s.erase(4);
|
893
|
-
s.insert(4);
|
894
|
-
s.erase(4);
|
895
|
-
CHECK_EQ(old_bucket_count, s.bucket_count());
|
896
|
-
}
|
897
|
-
{
|
898
|
-
dense_hash_set<int> s;
|
899
|
-
s.set_deleted_key(0);
|
900
|
-
s.set_empty_key(1);
|
901
|
-
const int old_bucket_count = s.bucket_count();
|
902
|
-
s.insert(4);
|
903
|
-
s.erase(4);
|
904
|
-
s.insert(4);
|
905
|
-
s.erase(4);
|
906
|
-
CHECK_EQ(old_bucket_count, s.bucket_count());
|
907
|
-
}
|
908
|
-
{
|
909
|
-
sparse_hash_set<int> s(2); // start small: only expects 2 items
|
910
|
-
CHECK_LT(s.bucket_count(), 32); // verify we actually do start small
|
911
|
-
s.set_deleted_key(0);
|
912
|
-
const int old_bucket_count = s.bucket_count();
|
913
|
-
s.insert(4);
|
914
|
-
s.erase(4);
|
915
|
-
s.insert(4);
|
916
|
-
s.erase(4);
|
917
|
-
CHECK_EQ(old_bucket_count, s.bucket_count());
|
918
|
-
}
|
919
|
-
{
|
920
|
-
dense_hash_set<int> s(2); // start small: only expects 2 items
|
921
|
-
CHECK_LT(s.bucket_count(), 32); // verify we actually do start small
|
922
|
-
s.set_deleted_key(0);
|
923
|
-
s.set_empty_key(1);
|
924
|
-
const int old_bucket_count = s.bucket_count();
|
925
|
-
s.insert(4);
|
926
|
-
s.erase(4);
|
927
|
-
s.insert(4);
|
928
|
-
s.erase(4);
|
929
|
-
CHECK_EQ(old_bucket_count, s.bucket_count());
|
930
|
-
}
|
931
|
-
}
|
932
|
-
|
933
|
-
class TestHashFcn : public SPARSEHASH_HASH<int> {
|
934
|
-
public:
|
935
|
-
explicit TestHashFcn(int i)
|
936
|
-
: id_(i) {
|
937
|
-
}
|
938
|
-
|
939
|
-
int id() const {
|
940
|
-
return id_;
|
941
|
-
}
|
942
|
-
|
943
|
-
private:
|
944
|
-
int id_;
|
945
|
-
};
|
946
|
-
|
947
|
-
class TestEqualTo : public equal_to<int> {
|
948
|
-
public:
|
949
|
-
explicit TestEqualTo(int i)
|
950
|
-
: id_(i) {
|
951
|
-
}
|
952
|
-
|
953
|
-
int id() const {
|
954
|
-
return id_;
|
955
|
-
}
|
956
|
-
|
957
|
-
private:
|
958
|
-
int id_;
|
959
|
-
};
|
960
|
-
|
961
|
-
template <template <class V, class H, class E, class A> class Hash>
|
962
|
-
void TestHash() {
|
963
|
-
typedef Hash<int, TestHashFcn, TestEqualTo, allocator<int> > TheHash;
|
964
|
-
const TestHashFcn fcn(1);
|
965
|
-
const TestEqualTo eqt(2);
|
966
|
-
{
|
967
|
-
const TheHash simple(0, fcn, eqt);
|
968
|
-
CHECK(fcn.id() == simple.hash_funct().id());
|
969
|
-
CHECK(eqt.id() == simple.key_eq().id());
|
970
|
-
}
|
971
|
-
{
|
972
|
-
const set<int> input;
|
973
|
-
const TheHash iterated(input.begin(), input.end(), 0, fcn, eqt);
|
974
|
-
CHECK(fcn.id() == iterated.hash_funct().id());
|
975
|
-
CHECK(eqt.id() == iterated.key_eq().id());
|
976
|
-
}
|
977
|
-
}
|
978
|
-
|
979
|
-
static void TestHashes() {
|
980
|
-
TestHash<sparse_hash_set>();
|
981
|
-
TestHash<dense_hash_set>();
|
982
|
-
}
|
983
|
-
|
984
|
-
template <template <class K, class T, class H, class E, class A> class Map>
|
985
|
-
void TestMap() {
|
986
|
-
typedef Map<int, int, TestHashFcn, TestEqualTo, allocator<int> > TheMap;
|
987
|
-
const TestHashFcn fcn(1);
|
988
|
-
const TestEqualTo eqt(2);
|
989
|
-
{
|
990
|
-
const TheMap simple(0, fcn, eqt);
|
991
|
-
CHECK(fcn.id() == simple.hash_funct().id());
|
992
|
-
CHECK(eqt.id() == simple.key_eq().id());
|
993
|
-
}
|
994
|
-
{
|
995
|
-
const map<int, int> input;
|
996
|
-
const TheMap iterated(input.begin(), input.end(), 0, fcn, eqt);
|
997
|
-
CHECK(fcn.id() == iterated.hash_funct().id());
|
998
|
-
CHECK(eqt.id() == iterated.key_eq().id());
|
999
|
-
}
|
1000
|
-
}
|
1001
|
-
|
1002
|
-
static void TestMaps() {
|
1003
|
-
TestMap<sparse_hash_map>();
|
1004
|
-
TestMap<dense_hash_map>();
|
1005
|
-
}
|
1006
|
-
|
1007
|
-
static void TestOperatorEquals() {
|
1008
|
-
{
|
1009
|
-
dense_hash_set<int> sa, sb;
|
1010
|
-
sa.set_empty_key(-1);
|
1011
|
-
sb.set_empty_key(-1);
|
1012
|
-
sa.set_deleted_key(-2);
|
1013
|
-
sb.set_deleted_key(-2);
|
1014
|
-
CHECK(sa == sb);
|
1015
|
-
sa.insert(1);
|
1016
|
-
CHECK(sa != sb);
|
1017
|
-
sa.insert(2);
|
1018
|
-
CHECK(sa != sb);
|
1019
|
-
sb.insert(2);
|
1020
|
-
CHECK(sa != sb);
|
1021
|
-
sb.insert(1);
|
1022
|
-
CHECK(sa == sb);
|
1023
|
-
sb.erase(1);
|
1024
|
-
CHECK(sa != sb);
|
1025
|
-
}
|
1026
|
-
{
|
1027
|
-
dense_hash_map<int, string> sa, sb;
|
1028
|
-
sa.set_empty_key(-1);
|
1029
|
-
sb.set_empty_key(-1);
|
1030
|
-
sa.set_deleted_key(-2);
|
1031
|
-
sb.set_deleted_key(-2);
|
1032
|
-
CHECK(sa == sb);
|
1033
|
-
sa.insert(make_pair(1, "a"));
|
1034
|
-
CHECK(sa != sb);
|
1035
|
-
sa.insert(make_pair(2, "b"));
|
1036
|
-
CHECK(sa != sb);
|
1037
|
-
sb.insert(make_pair(2, "b"));
|
1038
|
-
CHECK(sa != sb);
|
1039
|
-
sb.insert(make_pair(1, "a"));
|
1040
|
-
CHECK(sa == sb);
|
1041
|
-
sa[1] = "goodbye";
|
1042
|
-
CHECK(sa != sb);
|
1043
|
-
sb.erase(1);
|
1044
|
-
CHECK(sa != sb);
|
1045
|
-
}
|
1046
|
-
}
|
1047
|
-
|
1048
|
-
// Test the interface for setting the resize parameters in a
|
1049
|
-
// sparse_hash_set or dense_hash_set. If use_tr1_api is true,
|
1050
|
-
// we use the newer tr1-inspired functions to set resize_parameters,
|
1051
|
-
// rather than my old, home-grown API
|
1052
|
-
template<class HS, bool USE_TR1_API>
|
1053
|
-
static void TestResizingParameters() {
|
1054
|
-
const int kSize = 16536;
|
1055
|
-
// Check growing past various thresholds and then shrinking below
|
1056
|
-
// them.
|
1057
|
-
for (float grow_threshold = 0.2f;
|
1058
|
-
grow_threshold <= 0.8f;
|
1059
|
-
grow_threshold += 0.2f) {
|
1060
|
-
HS hs;
|
1061
|
-
hs.set_deleted_key(-1);
|
1062
|
-
set_empty_key(&hs, -2);
|
1063
|
-
if (USE_TR1_API) {
|
1064
|
-
hs.max_load_factor(grow_threshold);
|
1065
|
-
hs.min_load_factor(0.0);
|
1066
|
-
} else {
|
1067
|
-
hs.set_resizing_parameters(0.0, grow_threshold);
|
1068
|
-
}
|
1069
|
-
hs.resize(kSize);
|
1070
|
-
size_t bucket_count = hs.bucket_count();
|
1071
|
-
// Erase and insert an element to set consider_shrink = true,
|
1072
|
-
// which should not cause a shrink because the threshold is 0.0.
|
1073
|
-
insert(&hs, 1);
|
1074
|
-
hs.erase(1);
|
1075
|
-
for (int i = 0;; ++i) {
|
1076
|
-
insert(&hs, i);
|
1077
|
-
if (static_cast<float>(hs.size())/bucket_count < grow_threshold) {
|
1078
|
-
CHECK(hs.bucket_count() == bucket_count);
|
1079
|
-
} else {
|
1080
|
-
CHECK(hs.bucket_count() > bucket_count);
|
1081
|
-
break;
|
1082
|
-
}
|
1083
|
-
}
|
1084
|
-
// Now set a shrink threshold 1% below the current size and remove
|
1085
|
-
// items until the size falls below that.
|
1086
|
-
const float shrink_threshold = static_cast<float>(hs.size()) /
|
1087
|
-
hs.bucket_count() - 0.01f;
|
1088
|
-
if (USE_TR1_API) {
|
1089
|
-
hs.max_load_factor(1.0);
|
1090
|
-
hs.min_load_factor(shrink_threshold);
|
1091
|
-
} else {
|
1092
|
-
hs.set_resizing_parameters(shrink_threshold, 1.0);
|
1093
|
-
}
|
1094
|
-
bucket_count = hs.bucket_count();
|
1095
|
-
for (int i = 0;; ++i) {
|
1096
|
-
hs.erase(i);
|
1097
|
-
// A resize is only triggered by an insert, so add and remove a
|
1098
|
-
// value every iteration to trigger the shrink as soon as the
|
1099
|
-
// threshold is passed.
|
1100
|
-
hs.erase(i+1);
|
1101
|
-
insert(&hs, i+1);
|
1102
|
-
if (static_cast<float>(hs.size())/bucket_count > shrink_threshold) {
|
1103
|
-
CHECK(hs.bucket_count() == bucket_count);
|
1104
|
-
} else {
|
1105
|
-
CHECK(hs.bucket_count() < bucket_count);
|
1106
|
-
break;
|
1107
|
-
}
|
1108
|
-
}
|
1109
|
-
}
|
1110
|
-
}
|
1111
|
-
|
1112
|
-
// Tests the some of the tr1-inspired API features.
|
1113
|
-
template<class HS>
|
1114
|
-
static void TestTR1API() {
|
1115
|
-
HS hs;
|
1116
|
-
hs.set_deleted_key(-1);
|
1117
|
-
set_empty_key(&hs, -2);
|
1118
|
-
|
1119
|
-
typename HS::size_type expected_bucknum = hs.bucket(1);
|
1120
|
-
insert(&hs, 1);
|
1121
|
-
typename HS::size_type bucknum = hs.bucket(1);
|
1122
|
-
CHECK(expected_bucknum == bucknum);
|
1123
|
-
typename HS::const_local_iterator b = hs.begin(bucknum);
|
1124
|
-
typename HS::const_local_iterator e = hs.end(bucknum);
|
1125
|
-
CHECK(b != e);
|
1126
|
-
CHECK(getintkey(*b) == 1);
|
1127
|
-
b++;
|
1128
|
-
CHECK(b == e);
|
1129
|
-
|
1130
|
-
hs.erase(1);
|
1131
|
-
bucknum = hs.bucket(1);
|
1132
|
-
CHECK(expected_bucknum == bucknum);
|
1133
|
-
b = hs.begin(bucknum);
|
1134
|
-
e = hs.end(bucknum);
|
1135
|
-
CHECK(b == e);
|
1136
|
-
|
1137
|
-
// For very small sets, the min-bucket-size gets in the way, so
|
1138
|
-
// let's make our hash_set bigger.
|
1139
|
-
for (int i = 0; i < 10000; i++)
|
1140
|
-
insert(&hs, i);
|
1141
|
-
float f = hs.load_factor();
|
1142
|
-
CHECK(f >= hs.min_load_factor());
|
1143
|
-
CHECK(f <= hs.max_load_factor());
|
1144
|
-
}
|
1145
|
-
|
1146
|
-
class MemUsingKey {
|
1147
|
-
public:
|
1148
|
-
// TODO(csilvers): nix this when requirement for zero-arg keys goes away
|
1149
|
-
MemUsingKey() : data_(new int) {
|
1150
|
-
net_allocations_++;
|
1151
|
-
}
|
1152
|
-
MemUsingKey(int i) : data_(new int(i)) {
|
1153
|
-
net_allocations_++;
|
1154
|
-
}
|
1155
|
-
MemUsingKey(const MemUsingKey& that) : data_(new int(*that.data_)) {
|
1156
|
-
net_allocations_++;
|
1157
|
-
}
|
1158
|
-
~MemUsingKey() {
|
1159
|
-
delete data_;
|
1160
|
-
net_allocations_--;
|
1161
|
-
CHECK_GE(net_allocations_, 0);
|
1162
|
-
}
|
1163
|
-
MemUsingKey& operator=(const MemUsingKey& that) {
|
1164
|
-
delete data_;
|
1165
|
-
data_ = new int(*that.data_);
|
1166
|
-
return *this;
|
1167
|
-
}
|
1168
|
-
struct Hash {
|
1169
|
-
size_t operator()(const MemUsingKey& x) const { return *x.data_; }
|
1170
|
-
};
|
1171
|
-
struct Equal {
|
1172
|
-
bool operator()(const MemUsingKey& x, const MemUsingKey& y) const {
|
1173
|
-
return *x.data_ == *y.data_;
|
1174
|
-
}
|
1175
|
-
};
|
1176
|
-
static int net_allocations() { return net_allocations_; }
|
1177
|
-
private:
|
1178
|
-
int* data_;
|
1179
|
-
static int net_allocations_;
|
1180
|
-
};
|
1181
|
-
|
1182
|
-
class MemUsingValue {
|
1183
|
-
public:
|
1184
|
-
// This also tests that value does not need to have a zero-arg constructor
|
1185
|
-
explicit MemUsingValue(const char* data) : data_(NULL) {
|
1186
|
-
Strcpy(data);
|
1187
|
-
}
|
1188
|
-
MemUsingValue(const MemUsingValue& that) : data_(NULL) {
|
1189
|
-
Strcpy(that.data_);
|
1190
|
-
}
|
1191
|
-
~MemUsingValue() {
|
1192
|
-
if (data_) {
|
1193
|
-
free(data_);
|
1194
|
-
net_allocations_--;
|
1195
|
-
CHECK_GE(net_allocations_, 0);
|
1196
|
-
}
|
1197
|
-
}
|
1198
|
-
MemUsingValue& operator=(const MemUsingValue& that) {
|
1199
|
-
if (data_) {
|
1200
|
-
free(data_);
|
1201
|
-
net_allocations_--;
|
1202
|
-
CHECK_GE(net_allocations_, 0);
|
1203
|
-
}
|
1204
|
-
Strcpy(that.data_);
|
1205
|
-
return *this;
|
1206
|
-
}
|
1207
|
-
static int net_allocations() { return net_allocations_; }
|
1208
|
-
private:
|
1209
|
-
void Strcpy(const char* data) {
|
1210
|
-
if (data) {
|
1211
|
-
data_ = (char*)malloc(strlen(data) + 1); // use malloc this time
|
1212
|
-
strcpy(data_, data); // strdup isn't so portable
|
1213
|
-
net_allocations_++;
|
1214
|
-
} else {
|
1215
|
-
data_ = NULL;
|
1216
|
-
}
|
1217
|
-
}
|
1218
|
-
char* data_;
|
1219
|
-
static int net_allocations_;
|
1220
|
-
};
|
1221
|
-
|
1222
|
-
// TODO(csilvers): nix this when set_empty_key doesn't require zero-arg value
|
1223
|
-
class MemUsingValueWithZeroArgConstructor : public MemUsingValue {
|
1224
|
-
public:
|
1225
|
-
MemUsingValueWithZeroArgConstructor(const char* data=NULL)
|
1226
|
-
: MemUsingValue(data) { }
|
1227
|
-
MemUsingValueWithZeroArgConstructor(
|
1228
|
-
const MemUsingValueWithZeroArgConstructor& that)
|
1229
|
-
: MemUsingValue(that) { }
|
1230
|
-
MemUsingValueWithZeroArgConstructor& operator=(
|
1231
|
-
const MemUsingValueWithZeroArgConstructor& that) {
|
1232
|
-
*static_cast<MemUsingValue*>(this)
|
1233
|
-
= *static_cast<const MemUsingValue*>(&that);
|
1234
|
-
return *this;
|
1235
|
-
}
|
1236
|
-
};
|
1237
|
-
|
1238
|
-
int MemUsingKey::net_allocations_ = 0;
|
1239
|
-
int MemUsingValue::net_allocations_ = 0;
|
1240
|
-
|
1241
|
-
|
1242
|
-
void TestMemoryManagement() {
|
1243
|
-
MemUsingKey deleted_key(-1);
|
1244
|
-
MemUsingKey empty_key(-2);
|
1245
|
-
|
1246
|
-
{
|
1247
|
-
// TODO(csilvers): fix sparsetable to allow missing zero-arg value ctor
|
1248
|
-
sparse_hash_map<MemUsingKey, MemUsingValueWithZeroArgConstructor,
|
1249
|
-
MemUsingKey::Hash, MemUsingKey::Equal> ht;
|
1250
|
-
ht.set_deleted_key(deleted_key);
|
1251
|
-
for (int i = 0; i < 1000; i++) {
|
1252
|
-
ht.insert(pair<MemUsingKey,MemUsingValueWithZeroArgConstructor>(
|
1253
|
-
i, MemUsingValueWithZeroArgConstructor("hello!")));
|
1254
|
-
ht.erase(i);
|
1255
|
-
CHECK_EQ(0, MemUsingValue::net_allocations());
|
1256
|
-
}
|
1257
|
-
}
|
1258
|
-
// Various copies of deleted_key will be hanging around until the
|
1259
|
-
// hashtable is destroyed, so it's only safe to do this test now.
|
1260
|
-
CHECK_EQ(2, MemUsingKey::net_allocations()); // for deleted+empty_key
|
1261
|
-
|
1262
|
-
{
|
1263
|
-
dense_hash_map<MemUsingKey, MemUsingValueWithZeroArgConstructor,
|
1264
|
-
MemUsingKey::Hash, MemUsingKey::Equal> ht;
|
1265
|
-
ht.set_empty_key(empty_key);
|
1266
|
-
ht.set_deleted_key(deleted_key);
|
1267
|
-
for (int i = 0; i < 1000; i++) {
|
1268
|
-
// As long as we have a zero-arg constructor for the value anyway,
|
1269
|
-
// use operator[] rather than the more verbose insert().
|
1270
|
-
ht[i] = MemUsingValueWithZeroArgConstructor("hello!");
|
1271
|
-
ht.erase(i);
|
1272
|
-
CHECK_EQ(0, MemUsingValue::net_allocations());
|
1273
|
-
}
|
1274
|
-
}
|
1275
|
-
CHECK_EQ(2, MemUsingKey::net_allocations()); // for deleted+empty_key
|
1276
|
-
}
|
1277
|
-
|
1278
|
-
template<class Key>
|
1279
|
-
struct SetKey {
|
1280
|
-
void operator()(Key* key, const Key& new_key) const {
|
1281
|
-
*key = new_key;
|
1282
|
-
}
|
1283
|
-
};
|
1284
|
-
|
1285
|
-
int main(int argc, char **argv) {
|
1286
|
-
TestOperatorEquals();
|
1287
|
-
|
1288
|
-
// SPARSEHASH_HASH is defined in sparseconfig.h. It resolves to the
|
1289
|
-
// system hash function (usually, but not always, named "hash") on
|
1290
|
-
// whatever system we're on.
|
1291
|
-
|
1292
|
-
// First try with the low-level hashtable interface
|
1293
|
-
LOGF << "\n\nTEST WITH DENSE_HASHTABLE\n\n";
|
1294
|
-
test<dense_hashtable<char *, char *, CharStarHash,
|
1295
|
-
Identity<char *>, SetKey<char *>, strcmp_fnc,
|
1296
|
-
allocator<char *> >,
|
1297
|
-
dense_hashtable<string, string, StrHash,
|
1298
|
-
Identity<string>, SetKey<string>,
|
1299
|
-
equal_to<string>,
|
1300
|
-
allocator<string> >,
|
1301
|
-
dense_hashtable<int, int, SPARSEHASH_HASH<int>,
|
1302
|
-
Identity<int>, SetKey<int>, equal_to<int>,
|
1303
|
-
allocator<int> > >(
|
1304
|
-
false);
|
1305
|
-
|
1306
|
-
// Now try with hash_set, which should be equivalent
|
1307
|
-
LOGF << "\n\nTEST WITH DENSE_HASH_SET\n\n";
|
1308
|
-
test<dense_hash_set<char *, CharStarHash, strcmp_fnc>,
|
1309
|
-
dense_hash_set<string, StrHash>,
|
1310
|
-
dense_hash_set<int> >(false);
|
1311
|
-
|
1312
|
-
TestResizingParameters<dense_hash_set<int>, true>(); // use tr1 API
|
1313
|
-
TestResizingParameters<dense_hash_set<int>, false>(); // use older API
|
1314
|
-
|
1315
|
-
// Now try with hash_map, which differs only in insert()
|
1316
|
-
LOGF << "\n\nTEST WITH DENSE_HASH_MAP\n\n";
|
1317
|
-
test<dense_hash_map<char *, int, CharStarHash, strcmp_fnc>,
|
1318
|
-
dense_hash_map<string, int, StrHash>,
|
1319
|
-
dense_hash_map<int, int> >(false);
|
1320
|
-
|
1321
|
-
// First try with the low-level hashtable interface
|
1322
|
-
LOGF << "\n\nTEST WITH SPARSE_HASHTABLE\n\n";
|
1323
|
-
test<sparse_hashtable<char *, char *, CharStarHash,
|
1324
|
-
Identity<char *>, SetKey<char *>, strcmp_fnc,
|
1325
|
-
allocator<char *> >,
|
1326
|
-
sparse_hashtable<string, string, StrHash,
|
1327
|
-
Identity<string>, SetKey<string>, equal_to<string>,
|
1328
|
-
allocator<string> >,
|
1329
|
-
sparse_hashtable<int, int, SPARSEHASH_HASH<int>,
|
1330
|
-
Identity<int>, SetKey<int>, equal_to<int>,
|
1331
|
-
allocator<int> > >(
|
1332
|
-
true);
|
1333
|
-
|
1334
|
-
// Now try with hash_set, which should be equivalent
|
1335
|
-
LOGF << "\n\nTEST WITH SPARSE_HASH_SET\n\n";
|
1336
|
-
test<sparse_hash_set<char *, CharStarHash, strcmp_fnc>,
|
1337
|
-
sparse_hash_set<string, StrHash>,
|
1338
|
-
sparse_hash_set<int> >(true);
|
1339
|
-
|
1340
|
-
TestResizingParameters<sparse_hash_set<int>, true>();
|
1341
|
-
TestResizingParameters<sparse_hash_set<int>, false>();
|
1342
|
-
|
1343
|
-
// Now try with hash_map, which differs only in insert()
|
1344
|
-
LOGF << "\n\nTEST WITH SPARSE_HASH_MAP\n\n";
|
1345
|
-
test<sparse_hash_map<char *, int, CharStarHash, strcmp_fnc>,
|
1346
|
-
sparse_hash_map<string, int, StrHash>,
|
1347
|
-
sparse_hash_map<int, int> >(true);
|
1348
|
-
|
1349
|
-
// Test that we use the optimized routines for simple data types
|
1350
|
-
LOGF << "\n\nTesting simple-data-type optimizations\n";
|
1351
|
-
TestSimpleDataTypeOptimizations();
|
1352
|
-
|
1353
|
-
// Test shrinking to very small sizes
|
1354
|
-
LOGF << "\n\nTesting shrinking behavior";
|
1355
|
-
TestShrinking();
|
1356
|
-
|
1357
|
-
// Test that the hashers and key_equals are used properly in hash tables and
|
1358
|
-
// hash maps.
|
1359
|
-
LOGF << "\n\nTesting hashers and key_equals\n";
|
1360
|
-
TestHashes();
|
1361
|
-
TestMaps();
|
1362
|
-
|
1363
|
-
LOGF << "\n\nTesting tr1 API\n";
|
1364
|
-
TestTR1API<sparse_hash_map<int, int> >();
|
1365
|
-
TestTR1API<dense_hash_map<int, int> >();
|
1366
|
-
TestTR1API<sparse_hash_set<int> >();
|
1367
|
-
TestTR1API<dense_hash_set<int> >();
|
1368
|
-
|
1369
|
-
// Test memory management when the keys and values are non-trivial
|
1370
|
-
LOGF << "\n\nTesting memory management\n";
|
1371
|
-
TestMemoryManagement();
|
1372
|
-
|
1373
|
-
LOGF << "\nAll tests pass.\n";
|
1374
|
-
return 0;
|
1375
|
-
}
|