rotoscope 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,92 @@
1
+ #include "stack.h"
2
+ #include <stdio.h>
3
+ #include <stdlib.h>
4
+ #include <string.h>
5
+ #include "ruby.h"
6
+ #include "tracepoint.h"
7
+
8
+ static void insert_root_node(rs_stack_t *stack) {
9
+ VALUE rb_unknown_str = rb_str_new_cstr(UNKNOWN_STR);
10
+ rs_tracepoint_t root_trace =
11
+ (rs_tracepoint_t){.event = UNKNOWN_STR,
12
+ .entity = rb_str_new_cstr("<ROOT>"),
13
+ .filepath = rb_unknown_str,
14
+ .method_name = rb_unknown_str,
15
+ .method_level = UNKNOWN_STR,
16
+ .lineno = 0};
17
+ rs_stack_push(stack, root_trace);
18
+ }
19
+
20
+ static void resize_buffer(rs_stack_t *stack) {
21
+ unsigned int newsize = stack->capacity * 2;
22
+ rs_stack_frame_t *resized_contents =
23
+ REALLOC_N(stack->contents, rs_stack_frame_t, newsize);
24
+ stack->contents = resized_contents;
25
+ stack->capacity = newsize;
26
+ }
27
+
28
+ bool rs_stack_full(rs_stack_t *stack) {
29
+ return stack->top >= stack->capacity - 1;
30
+ }
31
+
32
+ bool rs_stack_empty(rs_stack_t *stack) { return stack->top < 0; }
33
+
34
+ rs_stack_frame_t rs_stack_push(rs_stack_t *stack, rs_tracepoint_t trace) {
35
+ if (rs_stack_full(stack)) {
36
+ resize_buffer(stack);
37
+ }
38
+
39
+ rs_stack_frame_t *caller =
40
+ rs_stack_empty(stack) ? NULL : rs_stack_peek(stack);
41
+ rs_stack_frame_t new_frame =
42
+ (rs_stack_frame_t){.tp = trace, .caller = caller};
43
+
44
+ stack->contents[++stack->top] = new_frame;
45
+ return new_frame;
46
+ }
47
+
48
+ rs_stack_frame_t rs_stack_pop(rs_stack_t *stack) {
49
+ if (rs_stack_empty(stack)) {
50
+ fprintf(stderr, "Stack is empty!\n");
51
+ exit(1);
52
+ }
53
+
54
+ return stack->contents[stack->top--];
55
+ }
56
+
57
+ rs_stack_frame_t *rs_stack_peek(rs_stack_t *stack) {
58
+ if (rs_stack_empty(stack)) {
59
+ fprintf(stderr, "Stack is empty!\n");
60
+ exit(1);
61
+ }
62
+
63
+ return &stack->contents[stack->top];
64
+ }
65
+
66
+ void rs_stack_reset(rs_stack_t *stack, unsigned int capacity) {
67
+ stack->top = -1;
68
+ insert_root_node(stack);
69
+ }
70
+
71
+ void rs_stack_free(rs_stack_t *stack) {
72
+ xfree(stack->contents);
73
+ stack->contents = NULL;
74
+ stack->top = -1;
75
+ stack->capacity = 0;
76
+ }
77
+
78
+ void rs_stack_init(rs_stack_t *stack, unsigned int capacity) {
79
+ rs_stack_frame_t *contents = ALLOC_N(rs_stack_frame_t, capacity);
80
+ stack->contents = contents;
81
+ stack->capacity = capacity;
82
+ stack->top = -1;
83
+
84
+ insert_root_node(stack);
85
+ }
86
+
87
+ void rs_stack_mark(rs_stack_t *stack) {
88
+ for (int i = 0; i <= stack->top; i++) {
89
+ rs_stack_frame_t frame = stack->contents[i];
90
+ rs_tracepoint_mark(&frame.tp);
91
+ }
92
+ }
@@ -0,0 +1,29 @@
1
+ #ifndef _INC_ROTOSCOPE_STACK_H_
2
+ #define _INC_ROTOSCOPE_STACK_H_
3
+ #include <stdbool.h>
4
+ #include "tracepoint.h"
5
+
6
+ #define UNKNOWN_STR "<UNKNOWN>"
7
+
8
+ typedef struct rs_stack_frame_t {
9
+ struct rs_tracepoint_t tp;
10
+ struct rs_stack_frame_t *caller;
11
+ } rs_stack_frame_t;
12
+
13
+ typedef struct {
14
+ int capacity;
15
+ int top;
16
+ rs_stack_frame_t *contents;
17
+ } rs_stack_t;
18
+
19
+ void rs_stack_init(rs_stack_t *stack, unsigned int capacity);
20
+ void rs_stack_reset(rs_stack_t *stack, unsigned int capacity);
21
+ void rs_stack_free(rs_stack_t *stack);
22
+ rs_stack_frame_t rs_stack_push(rs_stack_t *stack, rs_tracepoint_t trace);
23
+ bool rs_stack_empty(rs_stack_t *stack);
24
+ bool rs_stack_full(rs_stack_t *stack);
25
+ rs_stack_frame_t rs_stack_pop(rs_stack_t *stack);
26
+ rs_stack_frame_t *rs_stack_peek(rs_stack_t *stack);
27
+ void rs_stack_mark(rs_stack_t *stack);
28
+
29
+ #endif
@@ -0,0 +1,33 @@
1
+ #include <ruby.h>
2
+ #include <stdbool.h>
3
+
4
+ #include "strmemo.h"
5
+ #include "uthash.h"
6
+
7
+ static unsigned long hash_djb2(unsigned char *str) {
8
+ unsigned long hash = 5381;
9
+ int c;
10
+ while ((c = *str++)) hash = ((hash << 5) + hash) + c;
11
+ return hash;
12
+ }
13
+
14
+ bool rs_strmemo_uniq(rs_strmemo_t **calls, unsigned char *entry) {
15
+ rs_strmemo_t *c = NULL;
16
+ unsigned long hashkey = hash_djb2(entry);
17
+
18
+ HASH_FIND_INT(*calls, &hashkey, c);
19
+ if (c != NULL) return false;
20
+
21
+ c = ALLOC(rs_strmemo_t);
22
+ c->key = hashkey;
23
+ HASH_ADD_INT(*calls, key, c);
24
+ return true;
25
+ }
26
+
27
+ void rs_strmemo_free(rs_strmemo_t *calls) {
28
+ rs_strmemo_t *current_call, *tmp;
29
+ HASH_ITER(hh, calls, current_call, tmp) {
30
+ HASH_DEL(calls, current_call);
31
+ xfree(current_call);
32
+ }
33
+ }
@@ -0,0 +1,14 @@
1
+ #ifndef _INC_ROTOSCOPE_STRMEMO_H_
2
+ #define _INC_ROTOSCOPE_STRMEMO_H_
3
+
4
+ #include "uthash.h"
5
+
6
+ typedef struct {
7
+ unsigned long key;
8
+ UT_hash_handle hh;
9
+ } rs_strmemo_t;
10
+
11
+ bool rs_strmemo_uniq(rs_strmemo_t **calls, unsigned char *entry);
12
+ void rs_strmemo_free(rs_strmemo_t *calls);
13
+
14
+ #endif
@@ -0,0 +1,8 @@
1
+ #include "tracepoint.h"
2
+ #include "ruby.h"
3
+
4
+ void rs_tracepoint_mark(rs_tracepoint_t *tracepoint) {
5
+ rb_gc_mark(tracepoint->entity);
6
+ rb_gc_mark(tracepoint->filepath);
7
+ rb_gc_mark(tracepoint->method_name);
8
+ }
@@ -0,0 +1,17 @@
1
+ #ifndef _INC_ROTOSCOPE_TRACEPOINT_H_
2
+ #define _INC_ROTOSCOPE_TRACEPOINT_H_
3
+
4
+ #include "ruby.h"
5
+
6
+ typedef struct rs_tracepoint_t {
7
+ const char *event;
8
+ VALUE entity;
9
+ VALUE filepath;
10
+ VALUE method_name;
11
+ const char *method_level;
12
+ unsigned int lineno;
13
+ } rs_tracepoint_t;
14
+
15
+ void rs_tracepoint_mark(rs_tracepoint_t *tracepoint);
16
+
17
+ #endif
data/lib/rotoscope.rb ADDED
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+ require 'rotoscope/rotoscope'
3
+ require 'fileutils'
4
+ require 'tempfile'
5
+ require 'csv'
6
+
7
+ class Rotoscope
8
+ class << self
9
+ def new(output_path, blacklist: [], flatten: false)
10
+ super(output_path, blacklist, flatten)
11
+ end
12
+
13
+ def trace(dest, blacklist: [], flatten: false, &block)
14
+ config = { blacklist: blacklist, flatten: flatten }
15
+ if dest.is_a?(String)
16
+ event_trace(dest, config, &block)
17
+ else
18
+ io_event_trace(dest, config, &block)
19
+ end
20
+ end
21
+
22
+ private
23
+
24
+ def with_temp_file(name)
25
+ temp_file = Tempfile.new(name)
26
+ yield temp_file
27
+ ensure
28
+ temp_file.close! if temp_file
29
+ end
30
+
31
+ def temp_event_trace(config, block)
32
+ with_temp_file("rotoscope_output") do |temp_file|
33
+ rs = event_trace(temp_file.path, config, &block)
34
+ yield rs
35
+ rs
36
+ end
37
+ end
38
+
39
+ def io_event_trace(dest_io, config, &block)
40
+ temp_event_trace(config, block) do |rs|
41
+ File.open(rs.log_path) do |rs_file|
42
+ IO.copy_stream(rs_file, dest_io)
43
+ end
44
+ end
45
+ end
46
+
47
+ def event_trace(dest_path, config)
48
+ rs = Rotoscope.new(dest_path, blacklist: config[:blacklist], flatten: config[:flatten])
49
+ rs.trace { yield rs }
50
+ rs
51
+ ensure
52
+ rs.close if rs
53
+ end
54
+ end
55
+
56
+ def closed?
57
+ state == :closed
58
+ end
59
+
60
+ def inspect
61
+ "Rotoscope(state: #{state}, log_path: \"#{short_log_path}\", object_id: #{format('0x00%x', object_id << 1)})"
62
+ end
63
+
64
+ private
65
+
66
+ def short_log_path
67
+ return log_path if log_path.length < 40
68
+ chars = log_path.chars
69
+ "#{chars.first(17).join('')}...#{chars.last(20).join('')}"
70
+ end
71
+ end
@@ -0,0 +1,1107 @@
1
+ /*
2
+ Copyright (c) 2003-2017, Troy D. Hanson http://troydhanson.github.com/uthash/
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ * Redistributions of source code must retain the above copyright
9
+ notice, this list of conditions and the following disclaimer.
10
+
11
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12
+ IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15
+ OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22
+ */
23
+
24
+ #ifndef UTHASH_H
25
+ #define UTHASH_H
26
+
27
+ #define UTHASH_VERSION 2.0.2
28
+
29
+ #include <string.h> /* memcmp, memset, strlen */
30
+ #include <stddef.h> /* ptrdiff_t */
31
+ #include <stdlib.h> /* exit */
32
+
33
+ /* These macros use decltype or the earlier __typeof GNU extension.
34
+ As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35
+ when compiling c++ source) this code uses whatever method is needed
36
+ or, for VS2008 where neither is available, uses casting workarounds. */
37
+ #if !defined(DECLTYPE) && !defined(NO_DECLTYPE)
38
+ #if defined(_MSC_VER) /* MS compiler */
39
+ #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
40
+ #define DECLTYPE(x) (decltype(x))
41
+ #else /* VS2008 or older (or VS2010 in C mode) */
42
+ #define NO_DECLTYPE
43
+ #endif
44
+ #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
45
+ #define NO_DECLTYPE
46
+ #else /* GNU, Sun and other compilers */
47
+ #define DECLTYPE(x) (__typeof(x))
48
+ #endif
49
+ #endif
50
+
51
+ #ifdef NO_DECLTYPE
52
+ #define DECLTYPE(x)
53
+ #define DECLTYPE_ASSIGN(dst,src) \
54
+ do { \
55
+ char **_da_dst = (char**)(&(dst)); \
56
+ *_da_dst = (char*)(src); \
57
+ } while (0)
58
+ #else
59
+ #define DECLTYPE_ASSIGN(dst,src) \
60
+ do { \
61
+ (dst) = DECLTYPE(dst)(src); \
62
+ } while (0)
63
+ #endif
64
+
65
+ /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
66
+ #if defined(_WIN32)
67
+ #if defined(_MSC_VER) && _MSC_VER >= 1600
68
+ #include <stdint.h>
69
+ #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
70
+ #include <stdint.h>
71
+ #else
72
+ typedef unsigned int uint32_t;
73
+ typedef unsigned char uint8_t;
74
+ #endif
75
+ #elif defined(__GNUC__) && !defined(__VXWORKS__)
76
+ #include <stdint.h>
77
+ #else
78
+ typedef unsigned int uint32_t;
79
+ typedef unsigned char uint8_t;
80
+ #endif
81
+
82
+ #ifndef uthash_fatal
83
+ #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
84
+ #endif
85
+ #ifndef uthash_malloc
86
+ #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
87
+ #endif
88
+ #ifndef uthash_free
89
+ #define uthash_free(ptr,sz) free(ptr) /* free fcn */
90
+ #endif
91
+ #ifndef uthash_bzero
92
+ #define uthash_bzero(a,n) memset(a,'\0',n)
93
+ #endif
94
+ #ifndef uthash_memcmp
95
+ #define uthash_memcmp(a,b,n) memcmp(a,b,n)
96
+ #endif
97
+ #ifndef uthash_strlen
98
+ #define uthash_strlen(s) strlen(s)
99
+ #endif
100
+
101
+ #ifndef uthash_noexpand_fyi
102
+ #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
103
+ #endif
104
+ #ifndef uthash_expand_fyi
105
+ #define uthash_expand_fyi(tbl) /* can be defined to log expands */
106
+ #endif
107
+
108
+ /* initial number of buckets */
109
+ #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
110
+ #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
111
+ #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
112
+
113
+ /* calculate the element whose hash handle address is hhp */
114
+ #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
115
+ /* calculate the hash handle from element address elp */
116
+ #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
117
+
118
+ #define HASH_VALUE(keyptr,keylen,hashv) \
119
+ do { \
120
+ HASH_FCN(keyptr, keylen, hashv); \
121
+ } while (0)
122
+
123
+ #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
124
+ do { \
125
+ (out) = NULL; \
126
+ if (head) { \
127
+ unsigned _hf_bkt; \
128
+ HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
129
+ if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
130
+ HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
131
+ } \
132
+ } \
133
+ } while (0)
134
+
135
+ #define HASH_FIND(hh,head,keyptr,keylen,out) \
136
+ do { \
137
+ unsigned _hf_hashv; \
138
+ HASH_VALUE(keyptr, keylen, _hf_hashv); \
139
+ HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
140
+ } while (0)
141
+
142
+ #ifdef HASH_BLOOM
143
+ #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
144
+ #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
145
+ #define HASH_BLOOM_MAKE(tbl) \
146
+ do { \
147
+ (tbl)->bloom_nbits = HASH_BLOOM; \
148
+ (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
149
+ if (!(tbl)->bloom_bv) { \
150
+ uthash_fatal("out of memory"); \
151
+ } \
152
+ uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
153
+ (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
154
+ } while (0)
155
+
156
+ #define HASH_BLOOM_FREE(tbl) \
157
+ do { \
158
+ uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
159
+ } while (0)
160
+
161
+ #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
162
+ #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
163
+
164
+ #define HASH_BLOOM_ADD(tbl,hashv) \
165
+ HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
166
+
167
+ #define HASH_BLOOM_TEST(tbl,hashv) \
168
+ HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
169
+
170
+ #else
171
+ #define HASH_BLOOM_MAKE(tbl)
172
+ #define HASH_BLOOM_FREE(tbl)
173
+ #define HASH_BLOOM_ADD(tbl,hashv)
174
+ #define HASH_BLOOM_TEST(tbl,hashv) (1)
175
+ #define HASH_BLOOM_BYTELEN 0U
176
+ #endif
177
+
178
+ #define HASH_MAKE_TABLE(hh,head) \
179
+ do { \
180
+ (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \
181
+ if (!(head)->hh.tbl) { \
182
+ uthash_fatal("out of memory"); \
183
+ } \
184
+ uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \
185
+ (head)->hh.tbl->tail = &((head)->hh); \
186
+ (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
187
+ (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
188
+ (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
189
+ (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
190
+ HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
191
+ if (!(head)->hh.tbl->buckets) { \
192
+ uthash_fatal("out of memory"); \
193
+ } \
194
+ uthash_bzero((head)->hh.tbl->buckets, \
195
+ HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
196
+ HASH_BLOOM_MAKE((head)->hh.tbl); \
197
+ (head)->hh.tbl->signature = HASH_SIGNATURE; \
198
+ } while (0)
199
+
200
+ #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
201
+ do { \
202
+ (replaced) = NULL; \
203
+ HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
204
+ if (replaced) { \
205
+ HASH_DELETE(hh, head, replaced); \
206
+ } \
207
+ HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
208
+ } while (0)
209
+
210
+ #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
211
+ do { \
212
+ (replaced) = NULL; \
213
+ HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
214
+ if (replaced) { \
215
+ HASH_DELETE(hh, head, replaced); \
216
+ } \
217
+ HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
218
+ } while (0)
219
+
220
+ #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
221
+ do { \
222
+ unsigned _hr_hashv; \
223
+ HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
224
+ HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
225
+ } while (0)
226
+
227
+ #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
228
+ do { \
229
+ unsigned _hr_hashv; \
230
+ HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
231
+ HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
232
+ } while (0)
233
+
234
+ #define HASH_APPEND_LIST(hh, head, add) \
235
+ do { \
236
+ (add)->hh.next = NULL; \
237
+ (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
238
+ (head)->hh.tbl->tail->next = (add); \
239
+ (head)->hh.tbl->tail = &((add)->hh); \
240
+ } while (0)
241
+
242
+ #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
243
+ do { \
244
+ do { \
245
+ if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \
246
+ break; \
247
+ } \
248
+ } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
249
+ } while (0)
250
+
251
+ #ifdef NO_DECLTYPE
252
+ #undef HASH_AKBI_INNER_LOOP
253
+ #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
254
+ do { \
255
+ char *_hs_saved_head = (char*)(head); \
256
+ do { \
257
+ DECLTYPE_ASSIGN(head, _hs_iter); \
258
+ if (cmpfcn(head, add) > 0) { \
259
+ DECLTYPE_ASSIGN(head, _hs_saved_head); \
260
+ break; \
261
+ } \
262
+ DECLTYPE_ASSIGN(head, _hs_saved_head); \
263
+ } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
264
+ } while (0)
265
+ #endif
266
+
267
+ #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
268
+ do { \
269
+ unsigned _ha_bkt; \
270
+ (add)->hh.hashv = (hashval); \
271
+ (add)->hh.key = (char*) (keyptr); \
272
+ (add)->hh.keylen = (unsigned) (keylen_in); \
273
+ if (!(head)) { \
274
+ (add)->hh.next = NULL; \
275
+ (add)->hh.prev = NULL; \
276
+ (head) = (add); \
277
+ HASH_MAKE_TABLE(hh, head); \
278
+ } else { \
279
+ void *_hs_iter = (head); \
280
+ (add)->hh.tbl = (head)->hh.tbl; \
281
+ HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \
282
+ if (_hs_iter) { \
283
+ (add)->hh.next = _hs_iter; \
284
+ if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \
285
+ HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \
286
+ } else { \
287
+ (head) = (add); \
288
+ } \
289
+ HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \
290
+ } else { \
291
+ HASH_APPEND_LIST(hh, head, add); \
292
+ } \
293
+ } \
294
+ (head)->hh.tbl->num_items++; \
295
+ HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
296
+ HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
297
+ HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
298
+ HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
299
+ HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \
300
+ } while (0)
301
+
302
+ #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
303
+ do { \
304
+ unsigned _hs_hashv; \
305
+ HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
306
+ HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
307
+ } while (0)
308
+
309
+ #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
310
+ HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
311
+
312
+ #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
313
+ HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
314
+
315
+ #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
316
+ do { \
317
+ unsigned _ha_bkt; \
318
+ (add)->hh.hashv = (hashval); \
319
+ (add)->hh.key = (char*) (keyptr); \
320
+ (add)->hh.keylen = (unsigned) (keylen_in); \
321
+ if (!(head)) { \
322
+ (add)->hh.next = NULL; \
323
+ (add)->hh.prev = NULL; \
324
+ (head) = (add); \
325
+ HASH_MAKE_TABLE(hh, head); \
326
+ } else { \
327
+ (add)->hh.tbl = (head)->hh.tbl; \
328
+ HASH_APPEND_LIST(hh, head, add); \
329
+ } \
330
+ (head)->hh.tbl->num_items++; \
331
+ HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
332
+ HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
333
+ HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
334
+ HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
335
+ HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \
336
+ } while (0)
337
+
338
+ #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
339
+ do { \
340
+ unsigned _ha_hashv; \
341
+ HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
342
+ HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
343
+ } while (0)
344
+
345
+ #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
346
+ HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
347
+
348
+ #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
349
+ HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
350
+
351
+ #define HASH_TO_BKT(hashv,num_bkts,bkt) \
352
+ do { \
353
+ bkt = ((hashv) & ((num_bkts) - 1U)); \
354
+ } while (0)
355
+
356
+ /* delete "delptr" from the hash table.
357
+ * "the usual" patch-up process for the app-order doubly-linked-list.
358
+ * The use of _hd_hh_del below deserves special explanation.
359
+ * These used to be expressed using (delptr) but that led to a bug
360
+ * if someone used the same symbol for the head and deletee, like
361
+ * HASH_DELETE(hh,users,users);
362
+ * We want that to work, but by changing the head (users) below
363
+ * we were forfeiting our ability to further refer to the deletee (users)
364
+ * in the patch-up process. Solution: use scratch space to
365
+ * copy the deletee pointer, then the latter references are via that
366
+ * scratch pointer rather than through the repointed (users) symbol.
367
+ */
368
+ #define HASH_DELETE(hh,head,delptr) \
369
+ HASH_DELETE_HH(hh, head, &(delptr)->hh)
370
+
371
+ #define HASH_DELETE_HH(hh,head,delptrhh) \
372
+ do { \
373
+ struct UT_hash_handle *_hd_hh_del = (delptrhh); \
374
+ if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \
375
+ HASH_BLOOM_FREE((head)->hh.tbl); \
376
+ uthash_free((head)->hh.tbl->buckets, \
377
+ (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
378
+ uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
379
+ (head) = NULL; \
380
+ } else { \
381
+ unsigned _hd_bkt; \
382
+ if (_hd_hh_del == (head)->hh.tbl->tail) { \
383
+ (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \
384
+ } \
385
+ if (_hd_hh_del->prev != NULL) { \
386
+ HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \
387
+ } else { \
388
+ DECLTYPE_ASSIGN(head, _hd_hh_del->next); \
389
+ } \
390
+ if (_hd_hh_del->next != NULL) { \
391
+ HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \
392
+ } \
393
+ HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
394
+ HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
395
+ (head)->hh.tbl->num_items--; \
396
+ } \
397
+ HASH_FSCK(hh, head, "HASH_DELETE"); \
398
+ } while (0)
399
+
400
+
401
+ /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
402
+ #define HASH_FIND_STR(head,findstr,out) \
403
+ HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
404
+ #define HASH_ADD_STR(head,strfield,add) \
405
+ HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
406
+ #define HASH_REPLACE_STR(head,strfield,add,replaced) \
407
+ HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
408
+ #define HASH_FIND_INT(head,findint,out) \
409
+ HASH_FIND(hh,head,findint,sizeof(int),out)
410
+ #define HASH_ADD_INT(head,intfield,add) \
411
+ HASH_ADD(hh,head,intfield,sizeof(int),add)
412
+ #define HASH_REPLACE_INT(head,intfield,add,replaced) \
413
+ HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
414
+ #define HASH_FIND_PTR(head,findptr,out) \
415
+ HASH_FIND(hh,head,findptr,sizeof(void *),out)
416
+ #define HASH_ADD_PTR(head,ptrfield,add) \
417
+ HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
418
+ #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
419
+ HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
420
+ #define HASH_DEL(head,delptr) \
421
+ HASH_DELETE(hh,head,delptr)
422
+
423
+ /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
424
+ * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
425
+ */
426
+ #ifdef HASH_DEBUG
427
+ #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
428
+ #define HASH_FSCK(hh,head,where) \
429
+ do { \
430
+ struct UT_hash_handle *_thh; \
431
+ if (head) { \
432
+ unsigned _bkt_i; \
433
+ unsigned _count = 0; \
434
+ char *_prev; \
435
+ for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \
436
+ unsigned _bkt_count = 0; \
437
+ _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
438
+ _prev = NULL; \
439
+ while (_thh) { \
440
+ if (_prev != (char*)(_thh->hh_prev)) { \
441
+ HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \
442
+ (where), (void*)_thh->hh_prev, (void*)_prev); \
443
+ } \
444
+ _bkt_count++; \
445
+ _prev = (char*)(_thh); \
446
+ _thh = _thh->hh_next; \
447
+ } \
448
+ _count += _bkt_count; \
449
+ if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
450
+ HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \
451
+ (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
452
+ } \
453
+ } \
454
+ if (_count != (head)->hh.tbl->num_items) { \
455
+ HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \
456
+ (where), (head)->hh.tbl->num_items, _count); \
457
+ } \
458
+ _count = 0; \
459
+ _prev = NULL; \
460
+ _thh = &(head)->hh; \
461
+ while (_thh) { \
462
+ _count++; \
463
+ if (_prev != (char*)_thh->prev) { \
464
+ HASH_OOPS("%s: invalid prev %p, actual %p\n", \
465
+ (where), (void*)_thh->prev, (void*)_prev); \
466
+ } \
467
+ _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
468
+ _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \
469
+ } \
470
+ if (_count != (head)->hh.tbl->num_items) { \
471
+ HASH_OOPS("%s: invalid app item count %u, actual %u\n", \
472
+ (where), (head)->hh.tbl->num_items, _count); \
473
+ } \
474
+ } \
475
+ } while (0)
476
+ #else
477
+ #define HASH_FSCK(hh,head,where)
478
+ #endif
479
+
480
+ /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
481
+ * the descriptor to which this macro is defined for tuning the hash function.
482
+ * The app can #include <unistd.h> to get the prototype for write(2). */
483
+ #ifdef HASH_EMIT_KEYS
484
+ #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
485
+ do { \
486
+ unsigned _klen = fieldlen; \
487
+ write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
488
+ write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
489
+ } while (0)
490
+ #else
491
+ #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
492
+ #endif
493
+
494
+ /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
495
+ #ifdef HASH_FUNCTION
496
+ #define HASH_FCN HASH_FUNCTION
497
+ #else
498
+ #define HASH_FCN HASH_JEN
499
+ #endif
500
+
501
+ /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
502
+ #define HASH_BER(key,keylen,hashv) \
503
+ do { \
504
+ unsigned _hb_keylen = (unsigned)keylen; \
505
+ const unsigned char *_hb_key = (const unsigned char*)(key); \
506
+ (hashv) = 0; \
507
+ while (_hb_keylen-- != 0U) { \
508
+ (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
509
+ } \
510
+ } while (0)
511
+
512
+
513
+ /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
514
+ * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
515
+ #define HASH_SAX(key,keylen,hashv) \
516
+ do { \
517
+ unsigned _sx_i; \
518
+ const unsigned char *_hs_key = (const unsigned char*)(key); \
519
+ hashv = 0; \
520
+ for (_sx_i=0; _sx_i < keylen; _sx_i++) { \
521
+ hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
522
+ } \
523
+ } while (0)
524
+ /* FNV-1a variation */
525
+ #define HASH_FNV(key,keylen,hashv) \
526
+ do { \
527
+ unsigned _fn_i; \
528
+ const unsigned char *_hf_key = (const unsigned char*)(key); \
529
+ (hashv) = 2166136261U; \
530
+ for (_fn_i=0; _fn_i < keylen; _fn_i++) { \
531
+ hashv = hashv ^ _hf_key[_fn_i]; \
532
+ hashv = hashv * 16777619U; \
533
+ } \
534
+ } while (0)
535
+
536
+ #define HASH_OAT(key,keylen,hashv) \
537
+ do { \
538
+ unsigned _ho_i; \
539
+ const unsigned char *_ho_key=(const unsigned char*)(key); \
540
+ hashv = 0; \
541
+ for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
542
+ hashv += _ho_key[_ho_i]; \
543
+ hashv += (hashv << 10); \
544
+ hashv ^= (hashv >> 6); \
545
+ } \
546
+ hashv += (hashv << 3); \
547
+ hashv ^= (hashv >> 11); \
548
+ hashv += (hashv << 15); \
549
+ } while (0)
550
+
551
+ #define HASH_JEN_MIX(a,b,c) \
552
+ do { \
553
+ a -= b; a -= c; a ^= ( c >> 13 ); \
554
+ b -= c; b -= a; b ^= ( a << 8 ); \
555
+ c -= a; c -= b; c ^= ( b >> 13 ); \
556
+ a -= b; a -= c; a ^= ( c >> 12 ); \
557
+ b -= c; b -= a; b ^= ( a << 16 ); \
558
+ c -= a; c -= b; c ^= ( b >> 5 ); \
559
+ a -= b; a -= c; a ^= ( c >> 3 ); \
560
+ b -= c; b -= a; b ^= ( a << 10 ); \
561
+ c -= a; c -= b; c ^= ( b >> 15 ); \
562
+ } while (0)
563
+
564
+ #define HASH_JEN(key,keylen,hashv) \
565
+ do { \
566
+ unsigned _hj_i,_hj_j,_hj_k; \
567
+ unsigned const char *_hj_key=(unsigned const char*)(key); \
568
+ hashv = 0xfeedbeefu; \
569
+ _hj_i = _hj_j = 0x9e3779b9u; \
570
+ _hj_k = (unsigned)(keylen); \
571
+ while (_hj_k >= 12U) { \
572
+ _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
573
+ + ( (unsigned)_hj_key[2] << 16 ) \
574
+ + ( (unsigned)_hj_key[3] << 24 ) ); \
575
+ _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
576
+ + ( (unsigned)_hj_key[6] << 16 ) \
577
+ + ( (unsigned)_hj_key[7] << 24 ) ); \
578
+ hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
579
+ + ( (unsigned)_hj_key[10] << 16 ) \
580
+ + ( (unsigned)_hj_key[11] << 24 ) ); \
581
+ \
582
+ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
583
+ \
584
+ _hj_key += 12; \
585
+ _hj_k -= 12U; \
586
+ } \
587
+ hashv += (unsigned)(keylen); \
588
+ switch ( _hj_k ) { \
589
+ case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
590
+ case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
591
+ case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
592
+ case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
593
+ case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
594
+ case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
595
+ case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
596
+ case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
597
+ case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
598
+ case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
599
+ case 1: _hj_i += _hj_key[0]; \
600
+ } \
601
+ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
602
+ } while (0)
603
+
604
+ /* The Paul Hsieh hash function */
605
+ #undef get16bits
606
+ #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
607
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
608
+ #define get16bits(d) (*((const uint16_t *) (d)))
609
+ #endif
610
+
611
+ #if !defined (get16bits)
612
+ #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
613
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
614
+ #endif
615
+ #define HASH_SFH(key,keylen,hashv) \
616
+ do { \
617
+ unsigned const char *_sfh_key=(unsigned const char*)(key); \
618
+ uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
619
+ \
620
+ unsigned _sfh_rem = _sfh_len & 3U; \
621
+ _sfh_len >>= 2; \
622
+ hashv = 0xcafebabeu; \
623
+ \
624
+ /* Main loop */ \
625
+ for (;_sfh_len > 0U; _sfh_len--) { \
626
+ hashv += get16bits (_sfh_key); \
627
+ _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
628
+ hashv = (hashv << 16) ^ _sfh_tmp; \
629
+ _sfh_key += 2U*sizeof (uint16_t); \
630
+ hashv += hashv >> 11; \
631
+ } \
632
+ \
633
+ /* Handle end cases */ \
634
+ switch (_sfh_rem) { \
635
+ case 3: hashv += get16bits (_sfh_key); \
636
+ hashv ^= hashv << 16; \
637
+ hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
638
+ hashv += hashv >> 11; \
639
+ break; \
640
+ case 2: hashv += get16bits (_sfh_key); \
641
+ hashv ^= hashv << 11; \
642
+ hashv += hashv >> 17; \
643
+ break; \
644
+ case 1: hashv += *_sfh_key; \
645
+ hashv ^= hashv << 10; \
646
+ hashv += hashv >> 1; \
647
+ } \
648
+ \
649
+ /* Force "avalanching" of final 127 bits */ \
650
+ hashv ^= hashv << 3; \
651
+ hashv += hashv >> 5; \
652
+ hashv ^= hashv << 4; \
653
+ hashv += hashv >> 17; \
654
+ hashv ^= hashv << 25; \
655
+ hashv += hashv >> 6; \
656
+ } while (0)
657
+
658
+ #ifdef HASH_USING_NO_STRICT_ALIASING
659
+ /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
660
+ * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
661
+ * MurmurHash uses the faster approach only on CPU's where we know it's safe.
662
+ *
663
+ * Note the preprocessor built-in defines can be emitted using:
664
+ *
665
+ * gcc -m64 -dM -E - < /dev/null (on gcc)
666
+ * cc -## a.c (where a.c is a simple test file) (Sun Studio)
667
+ */
668
+ #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
669
+ #define MUR_GETBLOCK(p,i) p[i]
670
+ #else /* non intel */
671
+ #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
672
+ #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
673
+ #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
674
+ #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
675
+ #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
676
+ #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
677
+ #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
678
+ #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
679
+ #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
680
+ #else /* assume little endian non-intel */
681
+ #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
682
+ #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
683
+ #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
684
+ #endif
685
+ #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
686
+ (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
687
+ (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
688
+ MUR_ONE_THREE(p))))
689
+ #endif
690
+ #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
691
+ #define MUR_FMIX(_h) \
692
+ do { \
693
+ _h ^= _h >> 16; \
694
+ _h *= 0x85ebca6bu; \
695
+ _h ^= _h >> 13; \
696
+ _h *= 0xc2b2ae35u; \
697
+ _h ^= _h >> 16; \
698
+ } while (0)
699
+
700
+ #define HASH_MUR(key,keylen,hashv) \
701
+ do { \
702
+ const uint8_t *_mur_data = (const uint8_t*)(key); \
703
+ const int _mur_nblocks = (int)(keylen) / 4; \
704
+ uint32_t _mur_h1 = 0xf88D5353u; \
705
+ uint32_t _mur_c1 = 0xcc9e2d51u; \
706
+ uint32_t _mur_c2 = 0x1b873593u; \
707
+ uint32_t _mur_k1 = 0; \
708
+ const uint8_t *_mur_tail; \
709
+ const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
710
+ int _mur_i; \
711
+ for (_mur_i = -_mur_nblocks; _mur_i != 0; _mur_i++) { \
712
+ _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
713
+ _mur_k1 *= _mur_c1; \
714
+ _mur_k1 = MUR_ROTL32(_mur_k1,15); \
715
+ _mur_k1 *= _mur_c2; \
716
+ \
717
+ _mur_h1 ^= _mur_k1; \
718
+ _mur_h1 = MUR_ROTL32(_mur_h1,13); \
719
+ _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
720
+ } \
721
+ _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
722
+ _mur_k1=0; \
723
+ switch ((keylen) & 3U) { \
724
+ case 0: break; \
725
+ case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
726
+ case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
727
+ case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
728
+ _mur_k1 *= _mur_c1; \
729
+ _mur_k1 = MUR_ROTL32(_mur_k1,15); \
730
+ _mur_k1 *= _mur_c2; \
731
+ _mur_h1 ^= _mur_k1; \
732
+ } \
733
+ _mur_h1 ^= (uint32_t)(keylen); \
734
+ MUR_FMIX(_mur_h1); \
735
+ hashv = _mur_h1; \
736
+ } while (0)
737
+ #endif /* HASH_USING_NO_STRICT_ALIASING */
738
+
739
+ /* iterate over items in a known bucket to find desired item */
740
+ #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
741
+ do { \
742
+ if ((head).hh_head != NULL) { \
743
+ DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
744
+ } else { \
745
+ (out) = NULL; \
746
+ } \
747
+ while ((out) != NULL) { \
748
+ if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
749
+ if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
750
+ break; \
751
+ } \
752
+ } \
753
+ if ((out)->hh.hh_next != NULL) { \
754
+ DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
755
+ } else { \
756
+ (out) = NULL; \
757
+ } \
758
+ } \
759
+ } while (0)
760
+
761
+ /* add an item to a bucket */
762
+ #define HASH_ADD_TO_BKT(head,addhh) \
763
+ do { \
764
+ UT_hash_bucket *_ha_head = &(head); \
765
+ _ha_head->count++; \
766
+ (addhh)->hh_next = _ha_head->hh_head; \
767
+ (addhh)->hh_prev = NULL; \
768
+ if (_ha_head->hh_head != NULL) { \
769
+ _ha_head->hh_head->hh_prev = (addhh); \
770
+ } \
771
+ _ha_head->hh_head = (addhh); \
772
+ if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \
773
+ && !(addhh)->tbl->noexpand) { \
774
+ HASH_EXPAND_BUCKETS((addhh)->tbl); \
775
+ } \
776
+ } while (0)
777
+
778
+ /* remove an item from a given bucket */
779
+ #define HASH_DEL_IN_BKT(head,delhh) \
780
+ do { \
781
+ UT_hash_bucket *_hd_head = &(head); \
782
+ _hd_head->count--; \
783
+ if (_hd_head->hh_head == (delhh)) { \
784
+ _hd_head->hh_head = (delhh)->hh_next; \
785
+ } \
786
+ if ((delhh)->hh_prev) { \
787
+ (delhh)->hh_prev->hh_next = (delhh)->hh_next; \
788
+ } \
789
+ if ((delhh)->hh_next) { \
790
+ (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \
791
+ } \
792
+ } while (0)
793
+
794
+ /* Bucket expansion has the effect of doubling the number of buckets
795
+ * and redistributing the items into the new buckets. Ideally the
796
+ * items will distribute more or less evenly into the new buckets
797
+ * (the extent to which this is true is a measure of the quality of
798
+ * the hash function as it applies to the key domain).
799
+ *
800
+ * With the items distributed into more buckets, the chain length
801
+ * (item count) in each bucket is reduced. Thus by expanding buckets
802
+ * the hash keeps a bound on the chain length. This bounded chain
803
+ * length is the essence of how a hash provides constant time lookup.
804
+ *
805
+ * The calculation of tbl->ideal_chain_maxlen below deserves some
806
+ * explanation. First, keep in mind that we're calculating the ideal
807
+ * maximum chain length based on the *new* (doubled) bucket count.
808
+ * In fractions this is just n/b (n=number of items,b=new num buckets).
809
+ * Since the ideal chain length is an integer, we want to calculate
810
+ * ceil(n/b). We don't depend on floating point arithmetic in this
811
+ * hash, so to calculate ceil(n/b) with integers we could write
812
+ *
813
+ * ceil(n/b) = (n/b) + ((n%b)?1:0)
814
+ *
815
+ * and in fact a previous version of this hash did just that.
816
+ * But now we have improved things a bit by recognizing that b is
817
+ * always a power of two. We keep its base 2 log handy (call it lb),
818
+ * so now we can write this with a bit shift and logical AND:
819
+ *
820
+ * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
821
+ *
822
+ */
823
+ #define HASH_EXPAND_BUCKETS(tbl) \
824
+ do { \
825
+ unsigned _he_bkt; \
826
+ unsigned _he_bkt_i; \
827
+ struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
828
+ UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
829
+ _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
830
+ 2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
831
+ if (!_he_new_buckets) { \
832
+ uthash_fatal("out of memory"); \
833
+ } \
834
+ uthash_bzero(_he_new_buckets, \
835
+ 2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
836
+ (tbl)->ideal_chain_maxlen = \
837
+ ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \
838
+ ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
839
+ (tbl)->nonideal_items = 0; \
840
+ for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \
841
+ _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \
842
+ while (_he_thh != NULL) { \
843
+ _he_hh_nxt = _he_thh->hh_next; \
844
+ HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \
845
+ _he_newbkt = &(_he_new_buckets[_he_bkt]); \
846
+ if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \
847
+ (tbl)->nonideal_items++; \
848
+ _he_newbkt->expand_mult = _he_newbkt->count / (tbl)->ideal_chain_maxlen; \
849
+ } \
850
+ _he_thh->hh_prev = NULL; \
851
+ _he_thh->hh_next = _he_newbkt->hh_head; \
852
+ if (_he_newbkt->hh_head != NULL) { \
853
+ _he_newbkt->hh_head->hh_prev = _he_thh; \
854
+ } \
855
+ _he_newbkt->hh_head = _he_thh; \
856
+ _he_thh = _he_hh_nxt; \
857
+ } \
858
+ } \
859
+ uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
860
+ (tbl)->num_buckets *= 2U; \
861
+ (tbl)->log2_num_buckets++; \
862
+ (tbl)->buckets = _he_new_buckets; \
863
+ (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \
864
+ ((tbl)->ineff_expands+1U) : 0U; \
865
+ if ((tbl)->ineff_expands > 1U) { \
866
+ (tbl)->noexpand = 1; \
867
+ uthash_noexpand_fyi(tbl); \
868
+ } \
869
+ uthash_expand_fyi(tbl); \
870
+ } while (0)
871
+
872
+
873
+ /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
874
+ /* Note that HASH_SORT assumes the hash handle name to be hh.
875
+ * HASH_SRT was added to allow the hash handle name to be passed in. */
876
+ #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
877
+ #define HASH_SRT(hh,head,cmpfcn) \
878
+ do { \
879
+ unsigned _hs_i; \
880
+ unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
881
+ struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
882
+ if (head != NULL) { \
883
+ _hs_insize = 1; \
884
+ _hs_looping = 1; \
885
+ _hs_list = &((head)->hh); \
886
+ while (_hs_looping != 0U) { \
887
+ _hs_p = _hs_list; \
888
+ _hs_list = NULL; \
889
+ _hs_tail = NULL; \
890
+ _hs_nmerges = 0; \
891
+ while (_hs_p != NULL) { \
892
+ _hs_nmerges++; \
893
+ _hs_q = _hs_p; \
894
+ _hs_psize = 0; \
895
+ for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \
896
+ _hs_psize++; \
897
+ _hs_q = ((_hs_q->next != NULL) ? \
898
+ HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
899
+ if (_hs_q == NULL) { \
900
+ break; \
901
+ } \
902
+ } \
903
+ _hs_qsize = _hs_insize; \
904
+ while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \
905
+ if (_hs_psize == 0U) { \
906
+ _hs_e = _hs_q; \
907
+ _hs_q = ((_hs_q->next != NULL) ? \
908
+ HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
909
+ _hs_qsize--; \
910
+ } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \
911
+ _hs_e = _hs_p; \
912
+ if (_hs_p != NULL) { \
913
+ _hs_p = ((_hs_p->next != NULL) ? \
914
+ HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
915
+ } \
916
+ _hs_psize--; \
917
+ } else if ((cmpfcn( \
918
+ DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \
919
+ DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \
920
+ )) <= 0) { \
921
+ _hs_e = _hs_p; \
922
+ if (_hs_p != NULL) { \
923
+ _hs_p = ((_hs_p->next != NULL) ? \
924
+ HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
925
+ } \
926
+ _hs_psize--; \
927
+ } else { \
928
+ _hs_e = _hs_q; \
929
+ _hs_q = ((_hs_q->next != NULL) ? \
930
+ HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
931
+ _hs_qsize--; \
932
+ } \
933
+ if ( _hs_tail != NULL ) { \
934
+ _hs_tail->next = ((_hs_e != NULL) ? \
935
+ ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \
936
+ } else { \
937
+ _hs_list = _hs_e; \
938
+ } \
939
+ if (_hs_e != NULL) { \
940
+ _hs_e->prev = ((_hs_tail != NULL) ? \
941
+ ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \
942
+ } \
943
+ _hs_tail = _hs_e; \
944
+ } \
945
+ _hs_p = _hs_q; \
946
+ } \
947
+ if (_hs_tail != NULL) { \
948
+ _hs_tail->next = NULL; \
949
+ } \
950
+ if (_hs_nmerges <= 1U) { \
951
+ _hs_looping = 0; \
952
+ (head)->hh.tbl->tail = _hs_tail; \
953
+ DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
954
+ } \
955
+ _hs_insize *= 2U; \
956
+ } \
957
+ HASH_FSCK(hh, head, "HASH_SRT"); \
958
+ } \
959
+ } while (0)
960
+
961
+ /* This function selects items from one hash into another hash.
962
+ * The end result is that the selected items have dual presence
963
+ * in both hashes. There is no copy of the items made; rather
964
+ * they are added into the new hash through a secondary hash
965
+ * hash handle that must be present in the structure. */
966
+ #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
967
+ do { \
968
+ unsigned _src_bkt, _dst_bkt; \
969
+ void *_last_elt = NULL, *_elt; \
970
+ UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
971
+ ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
972
+ if ((src) != NULL) { \
973
+ for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
974
+ for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
975
+ _src_hh != NULL; \
976
+ _src_hh = _src_hh->hh_next) { \
977
+ _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
978
+ if (cond(_elt)) { \
979
+ _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
980
+ _dst_hh->key = _src_hh->key; \
981
+ _dst_hh->keylen = _src_hh->keylen; \
982
+ _dst_hh->hashv = _src_hh->hashv; \
983
+ _dst_hh->prev = _last_elt; \
984
+ _dst_hh->next = NULL; \
985
+ if (_last_elt_hh != NULL) { \
986
+ _last_elt_hh->next = _elt; \
987
+ } \
988
+ if ((dst) == NULL) { \
989
+ DECLTYPE_ASSIGN(dst, _elt); \
990
+ HASH_MAKE_TABLE(hh_dst, dst); \
991
+ } else { \
992
+ _dst_hh->tbl = (dst)->hh_dst.tbl; \
993
+ } \
994
+ HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
995
+ HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], _dst_hh); \
996
+ HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \
997
+ (dst)->hh_dst.tbl->num_items++; \
998
+ _last_elt = _elt; \
999
+ _last_elt_hh = _dst_hh; \
1000
+ } \
1001
+ } \
1002
+ } \
1003
+ } \
1004
+ HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \
1005
+ } while (0)
1006
+
1007
+ #define HASH_CLEAR(hh,head) \
1008
+ do { \
1009
+ if ((head) != NULL) { \
1010
+ HASH_BLOOM_FREE((head)->hh.tbl); \
1011
+ uthash_free((head)->hh.tbl->buckets, \
1012
+ (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
1013
+ uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
1014
+ (head) = NULL; \
1015
+ } \
1016
+ } while (0)
1017
+
1018
+ #define HASH_OVERHEAD(hh,head) \
1019
+ (((head) != NULL) ? ( \
1020
+ (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
1021
+ ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
1022
+ sizeof(UT_hash_table) + \
1023
+ (HASH_BLOOM_BYTELEN))) : 0U)
1024
+
1025
+ #ifdef NO_DECLTYPE
1026
+ #define HASH_ITER(hh,head,el,tmp) \
1027
+ for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
1028
+ (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1029
+ #else
1030
+ #define HASH_ITER(hh,head,el,tmp) \
1031
+ for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
1032
+ (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1033
+ #endif
1034
+
1035
+ /* obtain a count of items in the hash */
1036
+ #define HASH_COUNT(head) HASH_CNT(hh,head)
1037
+ #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1038
+
1039
+ typedef struct UT_hash_bucket {
1040
+ struct UT_hash_handle *hh_head;
1041
+ unsigned count;
1042
+
1043
+ /* expand_mult is normally set to 0. In this situation, the max chain length
1044
+ * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1045
+ * the bucket's chain exceeds this length, bucket expansion is triggered).
1046
+ * However, setting expand_mult to a non-zero value delays bucket expansion
1047
+ * (that would be triggered by additions to this particular bucket)
1048
+ * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1049
+ * (The multiplier is simply expand_mult+1). The whole idea of this
1050
+ * multiplier is to reduce bucket expansions, since they are expensive, in
1051
+ * situations where we know that a particular bucket tends to be overused.
1052
+ * It is better to let its chain length grow to a longer yet-still-bounded
1053
+ * value, than to do an O(n) bucket expansion too often.
1054
+ */
1055
+ unsigned expand_mult;
1056
+
1057
+ } UT_hash_bucket;
1058
+
1059
+ /* random signature used only to find hash tables in external analysis */
1060
+ #define HASH_SIGNATURE 0xa0111fe1u
1061
+ #define HASH_BLOOM_SIGNATURE 0xb12220f2u
1062
+
1063
+ typedef struct UT_hash_table {
1064
+ UT_hash_bucket *buckets;
1065
+ unsigned num_buckets, log2_num_buckets;
1066
+ unsigned num_items;
1067
+ struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1068
+ ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1069
+
1070
+ /* in an ideal situation (all buckets used equally), no bucket would have
1071
+ * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1072
+ unsigned ideal_chain_maxlen;
1073
+
1074
+ /* nonideal_items is the number of items in the hash whose chain position
1075
+ * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1076
+ * hash distribution; reaching them in a chain traversal takes >ideal steps */
1077
+ unsigned nonideal_items;
1078
+
1079
+ /* ineffective expands occur when a bucket doubling was performed, but
1080
+ * afterward, more than half the items in the hash had nonideal chain
1081
+ * positions. If this happens on two consecutive expansions we inhibit any
1082
+ * further expansion, as it's not helping; this happens when the hash
1083
+ * function isn't a good fit for the key domain. When expansion is inhibited
1084
+ * the hash will still work, albeit no longer in constant time. */
1085
+ unsigned ineff_expands, noexpand;
1086
+
1087
+ uint32_t signature; /* used only to find hash tables in external analysis */
1088
+ #ifdef HASH_BLOOM
1089
+ uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1090
+ uint8_t *bloom_bv;
1091
+ uint8_t bloom_nbits;
1092
+ #endif
1093
+
1094
+ } UT_hash_table;
1095
+
1096
+ typedef struct UT_hash_handle {
1097
+ struct UT_hash_table *tbl;
1098
+ void *prev; /* prev element in app order */
1099
+ void *next; /* next element in app order */
1100
+ struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1101
+ struct UT_hash_handle *hh_next; /* next hh in bucket order */
1102
+ void *key; /* ptr to enclosing struct's key */
1103
+ unsigned keylen; /* enclosing struct's key len */
1104
+ unsigned hashv; /* result of hash-fcn(key) */
1105
+ } UT_hash_handle;
1106
+
1107
+ #endif /* UTHASH_H */