hashes 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,348 @@
1
+ /*-----------------------------------------------------------------------------
2
+ MurmurHash3 was written by Austin Appleby, and is placed in the public
3
+ domain. The author hereby disclaims copyright to this source code.
4
+
5
+ Note - The x86 and x64 versions do _not_ produce the same results, as the
6
+ algorithms are optimized for their respective platforms. You can still
7
+ compile and run any of them on any platform, but your performance with the
8
+ non-native version will be less than optimal.
9
+ */
10
+
11
+ /*#include "MurmurHash3.h"*/
12
+
13
+ #include <stddef.h>
14
+ #include <stdint.h>
15
+
16
+ /*-----------------------------------------------------------------------------
17
+ Platform-specific functions and macros
18
+ */
19
+
20
+ /* Microsoft Visual Studio */
21
+
22
+ #if defined(_MSC_VER)
23
+
24
+ #define FORCE_INLINE __forceinline
25
+
26
+ #include <stdlib.h>
27
+
28
+ #define ROTL32(x,y) _rotl(x,y)
29
+ #define ROTL64(x,y) _rotl64(x,y)
30
+
31
+ #define BIG_CONSTANT(x) (x)
32
+
33
+ /* Other compilers */
34
+
35
+ #else /* defined(_MSC_VER) */
36
+
37
+ #define FORCE_INLINE __attribute__((always_inline))
38
+
39
+ static inline uint32_t rotl32 ( uint32_t x, int8_t r )
40
+ {
41
+ return (x << r) | (x >> (32 - r));
42
+ }
43
+
44
+ static inline uint64_t rotl64 ( uint64_t x, int8_t r )
45
+ {
46
+ return (x << r) | (x >> (64 - r));
47
+ }
48
+
49
+ #define ROTL32(x,y) rotl32(x,y)
50
+ #define ROTL64(x,y) rotl64(x,y)
51
+
52
+ #define BIG_CONSTANT(x) (x##LLU)
53
+
54
+ #endif /* !defined(_MSC_VER) */
55
+
56
+ /*-----------------------------------------------------------------------------
57
+ Block read - if your platform needs to do endian-swapping or can only
58
+ handle aligned reads, do the conversion here
59
+ */
60
+
61
+ static FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
62
+ {
63
+ return p[i];
64
+ }
65
+
66
+ static FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
67
+ {
68
+ return p[i];
69
+ }
70
+
71
+ /*-----------------------------------------------------------------------------
72
+ Finalization mix - force all bits of a hash block to avalanche
73
+ */
74
+
75
+ static FORCE_INLINE uint32_t fmix32 ( uint32_t h )
76
+ {
77
+ h ^= h >> 16;
78
+ h *= 0x85ebca6b;
79
+ h ^= h >> 13;
80
+ h *= 0xc2b2ae35;
81
+ h ^= h >> 16;
82
+
83
+ return h;
84
+ }
85
+
86
+ /*----------*/
87
+
88
+ static FORCE_INLINE uint64_t fmix64 ( uint64_t k )
89
+ {
90
+ k ^= k >> 33;
91
+ k *= BIG_CONSTANT(0xff51afd7ed558ccd);
92
+ k ^= k >> 33;
93
+ k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
94
+ k ^= k >> 33;
95
+
96
+ return k;
97
+ }
98
+
99
+ /*-----------------------------------------------------------------------------*/
100
+
101
+ void MurmurHash3_x86_32 ( const void * key, int len,
102
+ uint32_t seed, void * out )
103
+ {
104
+ const uint8_t * data = (const uint8_t*)key;
105
+ const int nblocks = len / 4;
106
+
107
+ uint32_t h1 = seed;
108
+
109
+ uint32_t c1 = 0xcc9e2d51;
110
+ uint32_t c2 = 0x1b873593;
111
+
112
+ /*----------
113
+ body
114
+ */
115
+
116
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
117
+
118
+ for(int i = -nblocks; i; i++)
119
+ {
120
+ uint32_t k1 = getblock32(blocks,i);
121
+
122
+ k1 *= c1;
123
+ k1 = ROTL32(k1,15);
124
+ k1 *= c2;
125
+
126
+ h1 ^= k1;
127
+ h1 = ROTL32(h1,13);
128
+ h1 = h1*5+0xe6546b64;
129
+ }
130
+
131
+ /*----------
132
+ tail
133
+ */
134
+
135
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
136
+
137
+ uint32_t k1 = 0;
138
+
139
+ switch(len & 3)
140
+ {
141
+ case 3: k1 ^= tail[2] << 16;
142
+ case 2: k1 ^= tail[1] << 8;
143
+ case 1: k1 ^= tail[0];
144
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
145
+ };
146
+
147
+ /*----------
148
+ finalization
149
+ */
150
+
151
+ h1 ^= len;
152
+
153
+ h1 = fmix32(h1);
154
+
155
+ *(uint32_t*)out = h1;
156
+ }
157
+
158
+ /*-----------------------------------------------------------------------------*/
159
+
160
+ void MurmurHash3_x86_128 ( const void * key, const int len,
161
+ uint32_t seed, void * out )
162
+ {
163
+ const uint8_t * data = (const uint8_t*)key;
164
+ const int nblocks = len / 16;
165
+
166
+ uint32_t h1 = seed;
167
+ uint32_t h2 = seed;
168
+ uint32_t h3 = seed;
169
+ uint32_t h4 = seed;
170
+
171
+ uint32_t c1 = 0x239b961b;
172
+ uint32_t c2 = 0xab0e9789;
173
+ uint32_t c3 = 0x38b34ae5;
174
+ uint32_t c4 = 0xa1e38b93;
175
+
176
+ /*----------
177
+ body
178
+ */
179
+
180
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
181
+
182
+ for(int i = -nblocks; i; i++)
183
+ {
184
+ uint32_t k1 = getblock32(blocks,i*4+0);
185
+ uint32_t k2 = getblock32(blocks,i*4+1);
186
+ uint32_t k3 = getblock32(blocks,i*4+2);
187
+ uint32_t k4 = getblock32(blocks,i*4+3);
188
+
189
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
190
+
191
+ h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
192
+
193
+ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
194
+
195
+ h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
196
+
197
+ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
198
+
199
+ h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
200
+
201
+ k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
202
+
203
+ h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
204
+ }
205
+
206
+ /*----------
207
+ tail
208
+ */
209
+
210
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
211
+
212
+ uint32_t k1 = 0;
213
+ uint32_t k2 = 0;
214
+ uint32_t k3 = 0;
215
+ uint32_t k4 = 0;
216
+
217
+ switch(len & 15)
218
+ {
219
+ case 15: k4 ^= tail[14] << 16;
220
+ case 14: k4 ^= tail[13] << 8;
221
+ case 13: k4 ^= tail[12] << 0;
222
+ k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
223
+
224
+ case 12: k3 ^= tail[11] << 24;
225
+ case 11: k3 ^= tail[10] << 16;
226
+ case 10: k3 ^= tail[ 9] << 8;
227
+ case 9: k3 ^= tail[ 8] << 0;
228
+ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
229
+
230
+ case 8: k2 ^= tail[ 7] << 24;
231
+ case 7: k2 ^= tail[ 6] << 16;
232
+ case 6: k2 ^= tail[ 5] << 8;
233
+ case 5: k2 ^= tail[ 4] << 0;
234
+ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
235
+
236
+ case 4: k1 ^= tail[ 3] << 24;
237
+ case 3: k1 ^= tail[ 2] << 16;
238
+ case 2: k1 ^= tail[ 1] << 8;
239
+ case 1: k1 ^= tail[ 0] << 0;
240
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
241
+ };
242
+
243
+ /*----------
244
+ finalization
245
+ */
246
+
247
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
248
+
249
+ h1 += h2; h1 += h3; h1 += h4;
250
+ h2 += h1; h3 += h1; h4 += h1;
251
+
252
+ h1 = fmix32(h1);
253
+ h2 = fmix32(h2);
254
+ h3 = fmix32(h3);
255
+ h4 = fmix32(h4);
256
+
257
+ h1 += h2; h1 += h3; h1 += h4;
258
+ h2 += h1; h3 += h1; h4 += h1;
259
+
260
+ ((uint32_t*)out)[0] = h1;
261
+ ((uint32_t*)out)[1] = h2;
262
+ ((uint32_t*)out)[2] = h3;
263
+ ((uint32_t*)out)[3] = h4;
264
+ }
265
+
266
+ /*-----------------------------------------------------------------------------*/
267
+
268
+ void MurmurHash3_x64_128 ( const void * key, const int len,
269
+ const uint32_t seed, void * out )
270
+ {
271
+ const uint8_t * data = (const uint8_t*)key;
272
+ const int nblocks = len / 16;
273
+
274
+ uint64_t h1 = seed;
275
+ uint64_t h2 = seed;
276
+
277
+ uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
278
+ uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
279
+
280
+ /*----------
281
+ body
282
+ */
283
+
284
+ const uint64_t * blocks = (const uint64_t *)(data);
285
+
286
+ for(int i = 0; i < nblocks; i++)
287
+ {
288
+ uint64_t k1 = getblock64(blocks,i*2+0);
289
+ uint64_t k2 = getblock64(blocks,i*2+1);
290
+
291
+ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
292
+
293
+ h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
294
+
295
+ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
296
+
297
+ h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
298
+ }
299
+
300
+ /*----------
301
+ tail
302
+ */
303
+
304
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
305
+
306
+ uint64_t k1 = 0;
307
+ uint64_t k2 = 0;
308
+
309
+ switch(len & 15)
310
+ {
311
+ case 15: k2 ^= (uint64_t)(tail[14]) << 48;
312
+ case 14: k2 ^= (uint64_t)(tail[13]) << 40;
313
+ case 13: k2 ^= (uint64_t)(tail[12]) << 32;
314
+ case 12: k2 ^= (uint64_t)(tail[11]) << 24;
315
+ case 11: k2 ^= (uint64_t)(tail[10]) << 16;
316
+ case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
317
+ case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
318
+ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
319
+
320
+ case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
321
+ case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
322
+ case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
323
+ case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
324
+ case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
325
+ case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
326
+ case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
327
+ case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
328
+ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
329
+ };
330
+
331
+ /*----------
332
+ finalization
333
+ */
334
+
335
+ h1 ^= len; h2 ^= len;
336
+
337
+ h1 += h2;
338
+ h2 += h1;
339
+
340
+ h1 = fmix64(h1);
341
+ h2 = fmix64(h2);
342
+
343
+ h1 += h2;
344
+ h2 += h1;
345
+
346
+ ((uint64_t*)out)[0] = h1;
347
+ ((uint64_t*)out)[1] = h2;
348
+ }
data/lib/hashes.rb ADDED
@@ -0,0 +1 @@
1
+ require 'hashes/hashes'
metadata ADDED
@@ -0,0 +1,51 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: hashes
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.1
5
+ prerelease:
6
+ platform: ruby
7
+ authors:
8
+ - Ben Scott
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2012-04-16 00:00:00.000000000 Z
13
+ dependencies: []
14
+ description: Implementations of Lookup3 and Murmur3 hash functions.
15
+ email:
16
+ - gamepoet@gmail.com
17
+ executables: []
18
+ extensions:
19
+ - ext/hashes/extconf.rb
20
+ extra_rdoc_files: []
21
+ files:
22
+ - lib/hashes.rb
23
+ - ext/hashes/hashes.c
24
+ - ext/hashes/lookup3.c
25
+ - ext/hashes/murmur3.c
26
+ - ext/hashes/extconf.rb
27
+ homepage: http://github.com/gamepoet/hashes
28
+ licenses: []
29
+ post_install_message:
30
+ rdoc_options: []
31
+ require_paths:
32
+ - lib
33
+ required_ruby_version: !ruby/object:Gem::Requirement
34
+ none: false
35
+ requirements:
36
+ - - ! '>='
37
+ - !ruby/object:Gem::Version
38
+ version: '0'
39
+ required_rubygems_version: !ruby/object:Gem::Requirement
40
+ none: false
41
+ requirements:
42
+ - - ! '>='
43
+ - !ruby/object:Gem::Version
44
+ version: '0'
45
+ requirements: []
46
+ rubyforge_project:
47
+ rubygems_version: 1.8.22
48
+ signing_key:
49
+ specification_version: 3
50
+ summary: Non-cryptographic hash functions.
51
+ test_files: []