prometheus-client-mmap 0.16.2 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +22 -10
- data/ext/fast_mmaped_file/hashmap.c +467 -410
- data/ext/fast_mmaped_file/jsmn.c +244 -259
- data/ext/fast_mmaped_file/mmap.c +58 -0
- data/ext/fast_mmaped_file/mmap.h +6 -0
- data/ext/fast_mmaped_file/value_access.c +9 -6
- data/lib/fast_mmaped_file.bundle +0 -0
- data/lib/mmap.rb +7 -0
- data/lib/prometheus/#client.rb# +58 -0
- data/lib/prometheus/client/push.rb +120 -12
- data/lib/prometheus/client/version.rb +1 -1
- metadata +6 -4
@@ -5,115 +5,128 @@
|
|
5
5
|
* it under the terms of the MIT license. See LICENSE for details.
|
6
6
|
*/
|
7
7
|
|
8
|
-
#include "hashmap.h"
|
9
|
-
|
10
|
-
#include <errno.h>
|
11
|
-
#include <stdbool.h>
|
12
|
-
#include <stdint.h>
|
13
8
|
#include <stdlib.h>
|
9
|
+
#include <stdint.h>
|
10
|
+
#include <stdbool.h>
|
14
11
|
#include <string.h>
|
12
|
+
#include <errno.h>
|
13
|
+
|
14
|
+
#include "hashmap.h"
|
15
15
|
|
16
16
|
#ifndef HASHMAP_NOASSERT
|
17
17
|
#include <assert.h>
|
18
|
-
#define HASHMAP_ASSERT(expr)
|
18
|
+
#define HASHMAP_ASSERT(expr) assert(expr)
|
19
19
|
#else
|
20
20
|
#define HASHMAP_ASSERT(expr)
|
21
21
|
#endif
|
22
22
|
|
23
23
|
/* Table sizes must be powers of 2 */
|
24
|
-
#define HASHMAP_SIZE_MIN
|
25
|
-
#define HASHMAP_SIZE_DEFAULT
|
26
|
-
#define HASHMAP_SIZE_MOD(map, val)
|
24
|
+
#define HASHMAP_SIZE_MIN (1 << 5) /* 32 */
|
25
|
+
#define HASHMAP_SIZE_DEFAULT (1 << 8) /* 256 */
|
26
|
+
#define HASHMAP_SIZE_MOD(map, val) ((val) & ((map)->table_size - 1))
|
27
27
|
|
28
28
|
/* Limit for probing is 1/2 of table_size */
|
29
|
-
#define HASHMAP_PROBE_LEN(map)
|
29
|
+
#define HASHMAP_PROBE_LEN(map) ((map)->table_size >> 1)
|
30
30
|
/* Return the next linear probe index */
|
31
|
-
#define HASHMAP_PROBE_NEXT(map, index)
|
31
|
+
#define HASHMAP_PROBE_NEXT(map, index) HASHMAP_SIZE_MOD(map, (index) + 1)
|
32
32
|
|
33
33
|
/* Check if index b is less than or equal to index a */
|
34
|
-
#define HASHMAP_INDEX_LE(map, a, b)
|
34
|
+
#define HASHMAP_INDEX_LE(map, a, b) \
|
35
|
+
((a) == (b) || (((b) - (a)) & ((map)->table_size >> 1)) != 0)
|
36
|
+
|
35
37
|
|
36
38
|
struct hashmap_entry {
|
37
|
-
|
38
|
-
|
39
|
+
void *key;
|
40
|
+
void *data;
|
39
41
|
#ifdef HASHMAP_METRICS
|
40
|
-
|
42
|
+
size_t num_collisions;
|
41
43
|
#endif
|
42
44
|
};
|
43
45
|
|
46
|
+
|
44
47
|
/*
|
45
48
|
* Enforce a maximum 0.75 load factor.
|
46
49
|
*/
|
47
|
-
static inline size_t hashmap_table_min_size_calc(size_t num_entries)
|
50
|
+
static inline size_t hashmap_table_min_size_calc(size_t num_entries)
|
51
|
+
{
|
52
|
+
return num_entries + (num_entries / 3);
|
53
|
+
}
|
48
54
|
|
49
55
|
/*
|
50
56
|
* Calculate the optimal table size, given the specified max number
|
51
57
|
* of elements.
|
52
58
|
*/
|
53
|
-
static size_t hashmap_table_size_calc(size_t num_entries)
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
59
|
+
static size_t hashmap_table_size_calc(size_t num_entries)
|
60
|
+
{
|
61
|
+
size_t table_size;
|
62
|
+
size_t min_size;
|
63
|
+
|
64
|
+
table_size = hashmap_table_min_size_calc(num_entries);
|
65
|
+
|
66
|
+
/* Table size is always a power of 2 */
|
67
|
+
min_size = HASHMAP_SIZE_MIN;
|
68
|
+
while (min_size < table_size) {
|
69
|
+
min_size <<= 1;
|
70
|
+
}
|
71
|
+
return min_size;
|
65
72
|
}
|
66
73
|
|
67
74
|
/*
|
68
75
|
* Get a valid hash table index from a key.
|
69
76
|
*/
|
70
|
-
static inline size_t hashmap_calc_index(const struct hashmap *map,
|
71
|
-
|
77
|
+
static inline size_t hashmap_calc_index(const struct hashmap *map,
|
78
|
+
const void *key)
|
79
|
+
{
|
80
|
+
return HASHMAP_SIZE_MOD(map, map->hash(key));
|
72
81
|
}
|
73
82
|
|
74
83
|
/*
|
75
84
|
* Return the next populated entry, starting with the specified one.
|
76
85
|
* Returns NULL if there are no more valid entries.
|
77
86
|
*/
|
78
|
-
static struct hashmap_entry *hashmap_entry_get_populated(
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
87
|
+
static struct hashmap_entry *hashmap_entry_get_populated(
|
88
|
+
const struct hashmap *map, struct hashmap_entry *entry)
|
89
|
+
{
|
90
|
+
for (; entry < &map->table[map->table_size]; ++entry) {
|
91
|
+
if (entry->key) {
|
92
|
+
return entry;
|
93
|
+
}
|
94
|
+
}
|
95
|
+
return NULL;
|
85
96
|
}
|
86
97
|
|
87
98
|
/*
|
88
99
|
* Find the hashmap entry with the specified key, or an empty slot.
|
89
100
|
* Returns NULL if the entire table has been searched without finding a match.
|
90
101
|
*/
|
91
|
-
static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map,
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
102
|
+
static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map,
|
103
|
+
const void *key, bool find_empty)
|
104
|
+
{
|
105
|
+
size_t i;
|
106
|
+
size_t index;
|
107
|
+
size_t probe_len = HASHMAP_PROBE_LEN(map);
|
108
|
+
struct hashmap_entry *entry;
|
109
|
+
|
110
|
+
index = hashmap_calc_index(map, key);
|
111
|
+
|
112
|
+
/* Linear probing */
|
113
|
+
for (i = 0; i < probe_len; ++i) {
|
114
|
+
entry = &map->table[index];
|
115
|
+
if (!entry->key) {
|
116
|
+
if (find_empty) {
|
104
117
|
#ifdef HASHMAP_METRICS
|
105
|
-
|
118
|
+
entry->num_collisions = i;
|
106
119
|
#endif
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
120
|
+
return entry;
|
121
|
+
}
|
122
|
+
return NULL;
|
123
|
+
}
|
124
|
+
if (map->key_compare(key, entry->key) == 0) {
|
125
|
+
return entry;
|
126
|
+
}
|
127
|
+
index = HASHMAP_PROBE_NEXT(map, index);
|
128
|
+
}
|
129
|
+
return NULL;
|
117
130
|
}
|
118
131
|
|
119
132
|
/*
|
@@ -121,45 +134,47 @@ static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map, const
|
|
121
134
|
* the load factor and keep the chain continuous. This is a required
|
122
135
|
* step for hash maps using linear probing.
|
123
136
|
*/
|
124
|
-
static void hashmap_entry_remove(struct hashmap *map,
|
125
|
-
|
137
|
+
static void hashmap_entry_remove(struct hashmap *map,
|
138
|
+
struct hashmap_entry *removed_entry)
|
139
|
+
{
|
140
|
+
size_t i;
|
126
141
|
#ifdef HASHMAP_METRICS
|
127
|
-
|
142
|
+
size_t removed_i = 0;
|
128
143
|
#endif
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
144
|
+
size_t index;
|
145
|
+
size_t entry_index;
|
146
|
+
size_t removed_index = (removed_entry - map->table);
|
147
|
+
struct hashmap_entry *entry;
|
148
|
+
|
149
|
+
/* Free the key */
|
150
|
+
if (map->key_free) {
|
151
|
+
map->key_free(removed_entry->key);
|
152
|
+
}
|
153
|
+
--map->num_entries;
|
154
|
+
|
155
|
+
/* Fill the free slot in the chain */
|
156
|
+
index = HASHMAP_PROBE_NEXT(map, removed_index);
|
157
|
+
for (i = 1; i < map->table_size; ++i) {
|
158
|
+
entry = &map->table[index];
|
159
|
+
if (!entry->key) {
|
160
|
+
/* Reached end of chain */
|
161
|
+
break;
|
162
|
+
}
|
163
|
+
entry_index = hashmap_calc_index(map, entry->key);
|
164
|
+
/* Shift in entries with an index <= to the removed slot */
|
165
|
+
if (HASHMAP_INDEX_LE(map, removed_index, entry_index)) {
|
151
166
|
#ifdef HASHMAP_METRICS
|
152
|
-
|
153
|
-
|
167
|
+
entry->num_collisions -= (i - removed_i);
|
168
|
+
removed_i = i;
|
154
169
|
#endif
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
170
|
+
memcpy(removed_entry, entry, sizeof(*removed_entry));
|
171
|
+
removed_index = index;
|
172
|
+
removed_entry = entry;
|
173
|
+
}
|
174
|
+
index = HASHMAP_PROBE_NEXT(map, index);
|
175
|
+
}
|
176
|
+
/* Clear the last removed entry */
|
177
|
+
memset(removed_entry, 0, sizeof(*removed_entry));
|
163
178
|
}
|
164
179
|
|
165
180
|
/*
|
@@ -167,64 +182,68 @@ static void hashmap_entry_remove(struct hashmap *map, struct hashmap_entry *remo
|
|
167
182
|
* new_size MUST be a power of 2.
|
168
183
|
* Returns 0 on success and -errno on allocation or hash function failure.
|
169
184
|
*/
|
170
|
-
static int hashmap_rehash(struct hashmap *map, size_t new_size)
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
185
|
+
static int hashmap_rehash(struct hashmap *map, size_t new_size)
|
186
|
+
{
|
187
|
+
size_t old_size;
|
188
|
+
struct hashmap_entry *old_table;
|
189
|
+
struct hashmap_entry *new_table;
|
190
|
+
struct hashmap_entry *entry;
|
191
|
+
struct hashmap_entry *new_entry;
|
192
|
+
|
193
|
+
HASHMAP_ASSERT(new_size >= HASHMAP_SIZE_MIN);
|
194
|
+
HASHMAP_ASSERT((new_size & (new_size - 1)) == 0);
|
195
|
+
|
196
|
+
new_table = (struct hashmap_entry *)calloc(new_size,
|
197
|
+
sizeof(struct hashmap_entry));
|
198
|
+
if (!new_table) {
|
199
|
+
return -ENOMEM;
|
200
|
+
}
|
201
|
+
/* Backup old elements in case of rehash failure */
|
202
|
+
old_size = map->table_size;
|
203
|
+
old_table = map->table;
|
204
|
+
map->table_size = new_size;
|
205
|
+
map->table = new_table;
|
206
|
+
/* Rehash */
|
207
|
+
for (entry = old_table; entry < &old_table[old_size]; ++entry) {
|
208
|
+
if (!entry->data) {
|
209
|
+
/* Only copy entries with data */
|
210
|
+
continue;
|
211
|
+
}
|
212
|
+
new_entry = hashmap_entry_find(map, entry->key, true);
|
213
|
+
if (!new_entry) {
|
214
|
+
/*
|
215
|
+
* The load factor is too high with the new table
|
216
|
+
* size, or a poor hash function was used.
|
217
|
+
*/
|
218
|
+
goto revert;
|
219
|
+
}
|
220
|
+
/* Shallow copy (intentionally omits num_collisions) */
|
221
|
+
new_entry->key = entry->key;
|
222
|
+
new_entry->data = entry->data;
|
223
|
+
}
|
224
|
+
free(old_table);
|
225
|
+
return 0;
|
209
226
|
revert:
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
227
|
+
map->table_size = old_size;
|
228
|
+
map->table = old_table;
|
229
|
+
free(new_table);
|
230
|
+
return -EINVAL;
|
214
231
|
}
|
215
232
|
|
216
233
|
/*
|
217
234
|
* Iterate through all entries and free all keys.
|
218
235
|
*/
|
219
|
-
static void hashmap_free_keys(struct hashmap *map)
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
236
|
+
static void hashmap_free_keys(struct hashmap *map)
|
237
|
+
{
|
238
|
+
struct hashmap_iter *iter;
|
239
|
+
|
240
|
+
if (!map->key_free) {
|
241
|
+
return;
|
242
|
+
}
|
243
|
+
for (iter = hashmap_iter(map); iter;
|
244
|
+
iter = hashmap_iter_next(map, iter)) {
|
245
|
+
map->key_free((void *)hashmap_iter_get_key(iter));
|
246
|
+
}
|
228
247
|
}
|
229
248
|
|
230
249
|
/*
|
@@ -244,52 +263,58 @@ static void hashmap_free_keys(struct hashmap *map) {
|
|
244
263
|
* Returns 0 on success and -errno on failure.
|
245
264
|
*/
|
246
265
|
int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *),
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
266
|
+
int (*key_compare_func)(const void *, const void *),
|
267
|
+
size_t initial_size)
|
268
|
+
{
|
269
|
+
HASHMAP_ASSERT(map != NULL);
|
270
|
+
HASHMAP_ASSERT(hash_func != NULL);
|
271
|
+
HASHMAP_ASSERT(key_compare_func != NULL);
|
272
|
+
|
273
|
+
if (!initial_size) {
|
274
|
+
initial_size = HASHMAP_SIZE_DEFAULT;
|
275
|
+
} else {
|
276
|
+
/* Convert init size to valid table size */
|
277
|
+
initial_size = hashmap_table_size_calc(initial_size);
|
278
|
+
}
|
279
|
+
map->table_size_init = initial_size;
|
280
|
+
map->table_size = initial_size;
|
281
|
+
map->num_entries = 0;
|
282
|
+
map->table = (struct hashmap_entry *)calloc(initial_size,
|
283
|
+
sizeof(struct hashmap_entry));
|
284
|
+
if (!map->table) {
|
285
|
+
return -ENOMEM;
|
286
|
+
}
|
287
|
+
map->hash = hash_func;
|
288
|
+
map->key_compare = key_compare_func;
|
289
|
+
map->key_alloc = NULL;
|
290
|
+
map->key_free = NULL;
|
291
|
+
return 0;
|
270
292
|
}
|
271
293
|
|
272
294
|
/*
|
273
295
|
* Free the hashmap and all associated memory.
|
274
296
|
*/
|
275
|
-
void hashmap_destroy(struct hashmap *map)
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
297
|
+
void hashmap_destroy(struct hashmap *map)
|
298
|
+
{
|
299
|
+
if (!map) {
|
300
|
+
return;
|
301
|
+
}
|
302
|
+
hashmap_free_keys(map);
|
303
|
+
free(map->table);
|
304
|
+
memset(map, 0, sizeof(*map));
|
282
305
|
}
|
283
306
|
|
284
307
|
/*
|
285
308
|
* Enable internal memory management of hash keys.
|
286
309
|
*/
|
287
|
-
void hashmap_set_key_alloc_funcs(struct hashmap *map,
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
310
|
+
void hashmap_set_key_alloc_funcs(struct hashmap *map,
|
311
|
+
void *(*key_alloc_func)(const void *),
|
312
|
+
void (*key_free_func)(void *))
|
313
|
+
{
|
314
|
+
HASHMAP_ASSERT(map != NULL);
|
315
|
+
|
316
|
+
map->key_alloc = key_alloc_func;
|
317
|
+
map->key_free = key_free_func;
|
293
318
|
}
|
294
319
|
|
295
320
|
/*
|
@@ -299,125 +324,132 @@ void hashmap_set_key_alloc_funcs(struct hashmap *map, void *(*key_alloc_func)(co
|
|
299
324
|
* the return value with the data passed in to determine if a new entry was
|
300
325
|
* created. Returns NULL if memory allocation failed.
|
301
326
|
*/
|
302
|
-
void *hashmap_put(struct hashmap *map, const void *key, void *data)
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
327
|
+
void *hashmap_put(struct hashmap *map, const void *key, void *data)
|
328
|
+
{
|
329
|
+
struct hashmap_entry *entry;
|
330
|
+
|
331
|
+
HASHMAP_ASSERT(map != NULL);
|
332
|
+
HASHMAP_ASSERT(key != NULL);
|
333
|
+
|
334
|
+
/* Rehash with 2x capacity if load factor is approaching 0.75 */
|
335
|
+
if (map->table_size <= hashmap_table_min_size_calc(map->num_entries)) {
|
336
|
+
hashmap_rehash(map, map->table_size << 1);
|
337
|
+
}
|
338
|
+
entry = hashmap_entry_find(map, key, true);
|
339
|
+
if (!entry) {
|
340
|
+
/*
|
341
|
+
* Cannot find an empty slot. Either out of memory, or using
|
342
|
+
* a poor hash function. Attempt to rehash once to reduce
|
343
|
+
* chain length.
|
344
|
+
*/
|
345
|
+
if (hashmap_rehash(map, map->table_size << 1) < 0) {
|
346
|
+
return NULL;
|
347
|
+
}
|
348
|
+
entry = hashmap_entry_find(map, key, true);
|
349
|
+
if (!entry) {
|
350
|
+
return NULL;
|
351
|
+
}
|
352
|
+
}
|
353
|
+
if (!entry->key) {
|
354
|
+
/* Allocate copy of key to simplify memory management */
|
355
|
+
if (map->key_alloc) {
|
356
|
+
entry->key = map->key_alloc(key);
|
357
|
+
if (!entry->key) {
|
358
|
+
return NULL;
|
359
|
+
}
|
360
|
+
} else {
|
361
|
+
entry->key = (void *)key;
|
362
|
+
}
|
363
|
+
++map->num_entries;
|
364
|
+
} else if (entry->data) {
|
365
|
+
/* Do not overwrite existing data */
|
366
|
+
return entry->data;
|
367
|
+
}
|
368
|
+
entry->data = data;
|
369
|
+
return data;
|
344
370
|
}
|
345
371
|
|
346
372
|
/*
|
347
373
|
* Return the data pointer, or NULL if no entry exists.
|
348
374
|
*/
|
349
|
-
void *hashmap_get(const struct hashmap *map, const void *key)
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
375
|
+
void *hashmap_get(const struct hashmap *map, const void *key)
|
376
|
+
{
|
377
|
+
struct hashmap_entry *entry;
|
378
|
+
|
379
|
+
HASHMAP_ASSERT(map != NULL);
|
380
|
+
HASHMAP_ASSERT(key != NULL);
|
381
|
+
|
382
|
+
entry = hashmap_entry_find(map, key, false);
|
383
|
+
if (!entry) {
|
384
|
+
return NULL;
|
385
|
+
}
|
386
|
+
return entry->data;
|
360
387
|
}
|
361
388
|
|
362
389
|
/*
|
363
390
|
* Remove an entry with the specified key from the map.
|
364
391
|
* Returns the data pointer, or NULL, if no entry was found.
|
365
392
|
*/
|
366
|
-
void *hashmap_remove(struct hashmap *map, const void *key)
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
393
|
+
void *hashmap_remove(struct hashmap *map, const void *key)
|
394
|
+
{
|
395
|
+
struct hashmap_entry *entry;
|
396
|
+
void *data;
|
397
|
+
|
398
|
+
HASHMAP_ASSERT(map != NULL);
|
399
|
+
HASHMAP_ASSERT(key != NULL);
|
400
|
+
|
401
|
+
entry = hashmap_entry_find(map, key, false);
|
402
|
+
if (!entry) {
|
403
|
+
return NULL;
|
404
|
+
}
|
405
|
+
data = entry->data;
|
406
|
+
/* Clear the entry and make the chain contiguous */
|
407
|
+
hashmap_entry_remove(map, entry);
|
408
|
+
return data;
|
381
409
|
}
|
382
410
|
|
383
411
|
/*
|
384
412
|
* Remove all entries.
|
385
413
|
*/
|
386
|
-
void hashmap_clear(struct hashmap *map)
|
387
|
-
|
414
|
+
void hashmap_clear(struct hashmap *map)
|
415
|
+
{
|
416
|
+
HASHMAP_ASSERT(map != NULL);
|
388
417
|
|
389
|
-
|
390
|
-
|
391
|
-
|
418
|
+
hashmap_free_keys(map);
|
419
|
+
map->num_entries = 0;
|
420
|
+
memset(map->table, 0, sizeof(struct hashmap_entry) * map->table_size);
|
392
421
|
}
|
393
422
|
|
394
423
|
/*
|
395
424
|
* Remove all entries and reset the hash table to its initial size.
|
396
425
|
*/
|
397
|
-
void hashmap_reset(struct hashmap *map)
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
426
|
+
void hashmap_reset(struct hashmap *map)
|
427
|
+
{
|
428
|
+
struct hashmap_entry *new_table;
|
429
|
+
|
430
|
+
HASHMAP_ASSERT(map != NULL);
|
431
|
+
|
432
|
+
hashmap_clear(map);
|
433
|
+
if (map->table_size == map->table_size_init) {
|
434
|
+
return;
|
435
|
+
}
|
436
|
+
new_table = (struct hashmap_entry *)realloc(map->table,
|
437
|
+
sizeof(struct hashmap_entry) * map->table_size_init);
|
438
|
+
if (!new_table) {
|
439
|
+
return;
|
440
|
+
}
|
441
|
+
map->table = new_table;
|
442
|
+
map->table_size = map->table_size_init;
|
412
443
|
}
|
413
444
|
|
414
445
|
/*
|
415
446
|
* Return the number of entries in the hash map.
|
416
447
|
*/
|
417
|
-
size_t hashmap_size(const struct hashmap *map)
|
418
|
-
|
448
|
+
size_t hashmap_size(const struct hashmap *map)
|
449
|
+
{
|
450
|
+
HASHMAP_ASSERT(map != NULL);
|
419
451
|
|
420
|
-
|
452
|
+
return map->num_entries;
|
421
453
|
}
|
422
454
|
|
423
455
|
/*
|
@@ -426,78 +458,88 @@ size_t hashmap_size(const struct hashmap *map) {
|
|
426
458
|
* Hashmap iterators are INVALID after a put or remove operation is performed.
|
427
459
|
* hashmap_iter_remove() allows safe removal during iteration.
|
428
460
|
*/
|
429
|
-
struct hashmap_iter *hashmap_iter(const struct hashmap *map)
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
461
|
+
struct hashmap_iter *hashmap_iter(const struct hashmap *map)
|
462
|
+
{
|
463
|
+
HASHMAP_ASSERT(map != NULL);
|
464
|
+
|
465
|
+
if (!map->num_entries) {
|
466
|
+
return NULL;
|
467
|
+
}
|
468
|
+
return (struct hashmap_iter *)hashmap_entry_get_populated(map,
|
469
|
+
map->table);
|
436
470
|
}
|
437
471
|
|
438
472
|
/*
|
439
473
|
* Return an iterator to the next hashmap entry. Returns NULL if there are
|
440
474
|
* no more entries.
|
441
475
|
*/
|
442
|
-
struct hashmap_iter *hashmap_iter_next(const struct hashmap *map,
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
476
|
+
struct hashmap_iter *hashmap_iter_next(const struct hashmap *map,
|
477
|
+
const struct hashmap_iter *iter)
|
478
|
+
{
|
479
|
+
struct hashmap_entry *entry = (struct hashmap_entry *)iter;
|
480
|
+
|
481
|
+
HASHMAP_ASSERT(map != NULL);
|
482
|
+
|
483
|
+
if (!iter) {
|
484
|
+
return NULL;
|
485
|
+
}
|
486
|
+
return (struct hashmap_iter *)hashmap_entry_get_populated(map,
|
487
|
+
entry + 1);
|
451
488
|
}
|
452
489
|
|
453
490
|
/*
|
454
491
|
* Remove the hashmap entry pointed to by this iterator and return an
|
455
492
|
* iterator to the next entry. Returns NULL if there are no more entries.
|
456
493
|
*/
|
457
|
-
struct hashmap_iter *hashmap_iter_remove(struct hashmap *map,
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
494
|
+
struct hashmap_iter *hashmap_iter_remove(struct hashmap *map,
|
495
|
+
const struct hashmap_iter *iter)
|
496
|
+
{
|
497
|
+
struct hashmap_entry *entry = (struct hashmap_entry *)iter;
|
498
|
+
|
499
|
+
HASHMAP_ASSERT(map != NULL);
|
500
|
+
|
501
|
+
if (!iter) {
|
502
|
+
return NULL;
|
503
|
+
}
|
504
|
+
if (!entry->key) {
|
505
|
+
/* Iterator is invalid, so just return the next valid entry */
|
506
|
+
return hashmap_iter_next(map, iter);
|
507
|
+
}
|
508
|
+
hashmap_entry_remove(map, entry);
|
509
|
+
return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry);
|
471
510
|
}
|
472
511
|
|
473
512
|
/*
|
474
513
|
* Return the key of the entry pointed to by the iterator.
|
475
514
|
*/
|
476
|
-
const void *hashmap_iter_get_key(const struct hashmap_iter *iter)
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
515
|
+
const void *hashmap_iter_get_key(const struct hashmap_iter *iter)
|
516
|
+
{
|
517
|
+
if (!iter) {
|
518
|
+
return NULL;
|
519
|
+
}
|
520
|
+
return (const void *)((struct hashmap_entry *)iter)->key;
|
481
521
|
}
|
482
522
|
|
483
523
|
/*
|
484
524
|
* Return the data of the entry pointed to by the iterator.
|
485
525
|
*/
|
486
|
-
void *hashmap_iter_get_data(const struct hashmap_iter *iter)
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
526
|
+
void *hashmap_iter_get_data(const struct hashmap_iter *iter)
|
527
|
+
{
|
528
|
+
if (!iter) {
|
529
|
+
return NULL;
|
530
|
+
}
|
531
|
+
return ((struct hashmap_entry *)iter)->data;
|
491
532
|
}
|
492
533
|
|
493
534
|
/*
|
494
535
|
* Set the data pointer of the entry pointed to by the iterator.
|
495
536
|
*/
|
496
|
-
void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data)
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
537
|
+
void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data)
|
538
|
+
{
|
539
|
+
if (!iter) {
|
540
|
+
return;
|
541
|
+
}
|
542
|
+
((struct hashmap_entry *)iter)->data = data;
|
501
543
|
}
|
502
544
|
|
503
545
|
/*
|
@@ -508,38 +550,41 @@ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data) {
|
|
508
550
|
* Iteration is stopped if func returns non-zero. Returns func's return
|
509
551
|
* value if it is < 0, otherwise, 0.
|
510
552
|
*/
|
511
|
-
int hashmap_foreach(const struct hashmap *map,
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
553
|
+
int hashmap_foreach(const struct hashmap *map,
|
554
|
+
int (*func)(const void *, void *, void *), void *arg)
|
555
|
+
{
|
556
|
+
struct hashmap_entry *entry;
|
557
|
+
size_t num_entries;
|
558
|
+
const void *key;
|
559
|
+
int rc;
|
560
|
+
|
561
|
+
HASHMAP_ASSERT(map != NULL);
|
562
|
+
HASHMAP_ASSERT(func != NULL);
|
563
|
+
|
564
|
+
entry = map->table;
|
565
|
+
for (entry = map->table; entry < &map->table[map->table_size];
|
566
|
+
++entry) {
|
567
|
+
if (!entry->key) {
|
568
|
+
continue;
|
569
|
+
}
|
570
|
+
num_entries = map->num_entries;
|
571
|
+
key = entry->key;
|
572
|
+
rc = func(entry->key, entry->data, arg);
|
573
|
+
if (rc < 0) {
|
574
|
+
return rc;
|
575
|
+
}
|
576
|
+
if (rc > 0) {
|
577
|
+
return 0;
|
578
|
+
}
|
579
|
+
/* Run this entry again if func() deleted it */
|
580
|
+
if (entry->key != key) {
|
581
|
+
--entry;
|
582
|
+
} else if (num_entries != map->num_entries) {
|
583
|
+
/* Stop immediately if func put/removed another entry */
|
584
|
+
return -1;
|
585
|
+
}
|
586
|
+
}
|
587
|
+
return 0;
|
543
588
|
}
|
544
589
|
|
545
590
|
/*
|
@@ -547,89 +592,101 @@ int hashmap_foreach(const struct hashmap *map, int (*func)(const void *, void *,
|
|
547
592
|
* This is an implementation of the well-documented Jenkins one-at-a-time
|
548
593
|
* hash function.
|
549
594
|
*/
|
550
|
-
size_t hashmap_hash_string(const void *key)
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
595
|
+
size_t hashmap_hash_string(const void *key)
|
596
|
+
{
|
597
|
+
const char *key_str = (const char *)key;
|
598
|
+
size_t hash = 0;
|
599
|
+
|
600
|
+
for (; *key_str; ++key_str) {
|
601
|
+
hash += *key_str;
|
602
|
+
hash += (hash << 10);
|
603
|
+
hash ^= (hash >> 6);
|
604
|
+
}
|
605
|
+
hash += (hash << 3);
|
606
|
+
hash ^= (hash >> 11);
|
607
|
+
hash += (hash << 15);
|
608
|
+
return hash;
|
563
609
|
}
|
564
610
|
|
565
611
|
/*
|
566
612
|
* Default key comparator function for string keys.
|
567
613
|
*/
|
568
|
-
int hashmap_compare_string(const void *a, const void *b)
|
614
|
+
int hashmap_compare_string(const void *a, const void *b)
|
615
|
+
{
|
616
|
+
return strcmp((const char *)a, (const char *)b);
|
617
|
+
}
|
569
618
|
|
570
619
|
/*
|
571
620
|
* Default key allocation function for string keys. Use free() for the
|
572
621
|
* key_free_func.
|
573
622
|
*/
|
574
|
-
void *hashmap_alloc_key_string(const void *key)
|
623
|
+
void *hashmap_alloc_key_string(const void *key)
|
624
|
+
{
|
625
|
+
return (void *)strdup((const char *)key);
|
626
|
+
}
|
575
627
|
|
576
628
|
#ifdef HASHMAP_METRICS
|
577
629
|
/*
|
578
630
|
* Return the load factor.
|
579
631
|
*/
|
580
|
-
double hashmap_load_factor(const struct hashmap *map)
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
632
|
+
double hashmap_load_factor(const struct hashmap *map)
|
633
|
+
{
|
634
|
+
HASHMAP_ASSERT(map != NULL);
|
635
|
+
|
636
|
+
if (!map->table_size) {
|
637
|
+
return 0;
|
638
|
+
}
|
639
|
+
return (double)map->num_entries / map->table_size;
|
587
640
|
}
|
588
641
|
|
589
642
|
/*
|
590
643
|
* Return the average number of collisions per entry.
|
591
644
|
*/
|
592
|
-
double hashmap_collisions_mean(const struct hashmap *map)
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
645
|
+
double hashmap_collisions_mean(const struct hashmap *map)
|
646
|
+
{
|
647
|
+
struct hashmap_entry *entry;
|
648
|
+
size_t total_collisions = 0;
|
649
|
+
|
650
|
+
HASHMAP_ASSERT(map != NULL);
|
651
|
+
|
652
|
+
if (!map->num_entries) {
|
653
|
+
return 0;
|
654
|
+
}
|
655
|
+
for (entry = map->table; entry < &map->table[map->table_size];
|
656
|
+
++entry) {
|
657
|
+
if (!entry->key) {
|
658
|
+
continue;
|
659
|
+
}
|
660
|
+
total_collisions += entry->num_collisions;
|
661
|
+
}
|
662
|
+
return (double)total_collisions / map->num_entries;
|
608
663
|
}
|
609
664
|
|
610
665
|
/*
|
611
666
|
* Return the variance between entry collisions. The higher the variance,
|
612
667
|
* the more likely the hash function is poor and is resulting in clustering.
|
613
668
|
*/
|
614
|
-
double hashmap_collisions_variance(const struct hashmap *map)
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
669
|
+
double hashmap_collisions_variance(const struct hashmap *map)
|
670
|
+
{
|
671
|
+
struct hashmap_entry *entry;
|
672
|
+
double mean_collisions;
|
673
|
+
double variance;
|
674
|
+
double total_variance = 0;
|
675
|
+
|
676
|
+
HASHMAP_ASSERT(map != NULL);
|
677
|
+
|
678
|
+
if (!map->num_entries) {
|
679
|
+
return 0;
|
680
|
+
}
|
681
|
+
mean_collisions = hashmap_collisions_mean(map);
|
682
|
+
for (entry = map->table; entry < &map->table[map->table_size];
|
683
|
+
++entry) {
|
684
|
+
if (!entry->key) {
|
685
|
+
continue;
|
686
|
+
}
|
687
|
+
variance = (double)entry->num_collisions - mean_collisions;
|
688
|
+
total_variance += variance * variance;
|
689
|
+
}
|
690
|
+
return total_variance / map->num_entries;
|
634
691
|
}
|
635
692
|
#endif
|