prometheus-client-mmap 0.7.0.beta39 → 0.7.0.beta40

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,23 @@
1
+ #ifndef FILE_PARSING_H
2
+ #define FILE_PARSING_H
3
+ #include <hashmap.h>
4
+ #include <file_reading.h>
5
+
6
+ typedef struct {
7
+ char *json;
8
+ size_t json_size;
9
+ ID multiprocess_mode;
10
+ ID type;
11
+ VALUE pid;
12
+ char *name;
13
+ size_t name_len;
14
+ double value;
15
+ } entry_struct;
16
+
17
+ void hashmap_setup(struct hashmap *map);
18
+
19
+ void entries_destroy(struct hashmap *map);
20
+ int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map);
21
+ int sort_map_entries(const struct hashmap *map, entry_struct ***sorted_entries);
22
+
23
+ #endif
@@ -0,0 +1,93 @@
1
+ #include "file_reading.h"
2
+ #include <fcntl.h>
3
+ #include <errno.h>
4
+ #include <utils.h>
5
+
6
+ int file_open(file_t *file, const char *filepath){
7
+ file->fd = fopen(filepath, "r");
8
+ size_t filepath_len = strlen(filepath);
9
+ file->path = malloc(filepath_len);
10
+ memcpy(file->path, filepath, filepath_len + 1);
11
+
12
+ if (file->fd < 0){
13
+ save_exception(rb_eArgError, "Can't open %s, errno: %d", filepath, errno);
14
+
15
+ return 0;
16
+ }
17
+
18
+ if (fseek(file->fd, 0, SEEK_END) != 0) {
19
+ fclose(file->fd);
20
+ save_exception(rb_eIOError, "Can't fseek SEEK_END, errno: %d", 0, errno);
21
+
22
+ return 0;
23
+ }
24
+ file->length = ftell(file->fd);
25
+
26
+ // go to start
27
+ if (fseek(file->fd, 0L, SEEK_SET) != 0) {
28
+ fclose(file->fd);
29
+ save_exception(rb_eIOError, "Can't fseek %zu, errno: %d", 0, errno);
30
+
31
+ return 0;
32
+ }
33
+
34
+ return 1;
35
+ }
36
+
37
+ int file_close(file_t *file){
38
+ free(file->path);
39
+ if (fclose(file->fd) != 0) {
40
+ save_exception(rb_eIOError, "Can't fclose file, errno: %d", 0, errno);
41
+ return 0;
42
+ }
43
+ return 1;
44
+ }
45
+
46
+ int file_open_from_params(file_t *file, VALUE params){
47
+ if (RARRAY_LEN(params) != 4){
48
+ save_exception(rb_eArgError, "wrong number of arguments %lu instead of 4", RARRAY_LEN(params));
49
+ return 0;
50
+ }
51
+
52
+ VALUE filepath = rb_ary_entry(params, 0);
53
+
54
+ file->multiprocess_mode = rb_sym2id(rb_ary_entry(params, 1));
55
+ file->type = rb_sym2id(rb_ary_entry(params, 2));
56
+ file->pid = rb_ary_entry(params, 3);
57
+
58
+ return file_open(file, StringValueCStr(filepath));
59
+ }
60
+
61
+ int read_from_file(const file_t *source, buffer_t *data){
62
+ data->size = 0;
63
+ if (data->buffer == NULL){
64
+ data->buffer = malloc(source->length);
65
+ if (data->buffer == NULL){
66
+ save_exception(rb_eIOError, "Can't malloc %zu, errno: %d", source->length, errno);
67
+ return 0;
68
+ }
69
+
70
+ data->capacity = source->length;
71
+ } else if (data->capacity < source->length){
72
+ data->buffer = realloc(data->buffer, source->length);
73
+ if (data->buffer == NULL){
74
+ save_exception(rb_eIOError, "Can't realloc %zu, errno: %d", source->length, errno);
75
+ return 0;
76
+ }
77
+
78
+ data->capacity = source->length;
79
+ }
80
+
81
+ data->size = fread(data->buffer, sizeof(char), source->length, source->fd);
82
+
83
+ return 1;
84
+ }
85
+
86
+ void buffer_dispose(buffer_t *buffer){
87
+ if (buffer->buffer){
88
+ free(buffer->buffer);
89
+ }
90
+ buffer->buffer = NULL;
91
+ buffer->size = 0;
92
+ buffer->capacity = 0;
93
+ }
@@ -0,0 +1,30 @@
1
+ #ifndef FILE_READING_H
2
+ #define FILE_READING_H
3
+ #include <ruby.h>
4
+
5
+ typedef struct {
6
+ FILE *fd;
7
+ size_t length;
8
+ char *path;
9
+
10
+ // Information processed from file path
11
+ ID multiprocess_mode;
12
+ ID type;
13
+ VALUE pid;
14
+ } file_t;
15
+
16
+ typedef struct {
17
+ char *buffer;
18
+ size_t size;
19
+ size_t capacity;
20
+ } buffer_t;
21
+
22
+ int file_close(file_t *file);
23
+
24
+ int file_open_from_params(file_t *file, VALUE params);
25
+
26
+ int read_from_file(const file_t *source, buffer_t *data);
27
+
28
+ void buffer_dispose(buffer_t *buffer);
29
+
30
+ #endif
@@ -0,0 +1,13 @@
1
+ #ifndef GLOBALS_H
2
+ #define GLOBALS_H
3
+ #include <ruby.h>
4
+
5
+ extern ID sym_min;
6
+ extern ID sym_max;
7
+ extern ID sym_livesum;
8
+ extern ID sym_gauge;
9
+ extern ID sym_pid;
10
+ extern ID sym_samples;
11
+ extern ID sym_key_name;
12
+
13
+ #endif
@@ -0,0 +1,692 @@
1
+ /*
2
+ * Copyright (c) 2016-2017 David Leeds <davidesleeds@gmail.com>
3
+ *
4
+ * Hashmap is free software; you can redistribute it and/or modify
5
+ * it under the terms of the MIT license. See LICENSE for details.
6
+ */
7
+
8
+ #include <stdlib.h>
9
+ #include <stdint.h>
10
+ #include <stdbool.h>
11
+ #include <string.h>
12
+ #include <errno.h>
13
+
14
+ #include "hashmap.h"
15
+
16
+ #ifndef HASHMAP_NOASSERT
17
+ #include <assert.h>
18
+ #define HASHMAP_ASSERT(expr) assert(expr)
19
+ #else
20
+ #define HASHMAP_ASSERT(expr)
21
+ #endif
22
+
23
+ /* Table sizes must be powers of 2 */
24
+ #define HASHMAP_SIZE_MIN (1 << 5) /* 32 */
25
+ #define HASHMAP_SIZE_DEFAULT (1 << 8) /* 256 */
26
+ #define HASHMAP_SIZE_MOD(map, val) ((val) & ((map)->table_size - 1))
27
+
28
+ /* Limit for probing is 1/2 of table_size */
29
+ #define HASHMAP_PROBE_LEN(map) ((map)->table_size >> 1)
30
+ /* Return the next linear probe index */
31
+ #define HASHMAP_PROBE_NEXT(map, index) HASHMAP_SIZE_MOD(map, (index) + 1)
32
+
33
+ /* Check if index b is less than or equal to index a */
34
+ #define HASHMAP_INDEX_LE(map, a, b) \
35
+ ((a) == (b) || (((b) - (a)) & ((map)->table_size >> 1)) != 0)
36
+
37
+
38
+ struct hashmap_entry {
39
+ void *key;
40
+ void *data;
41
+ #ifdef HASHMAP_METRICS
42
+ size_t num_collisions;
43
+ #endif
44
+ };
45
+
46
+
47
+ /*
48
+ * Enforce a maximum 0.75 load factor.
49
+ */
50
+ static inline size_t hashmap_table_min_size_calc(size_t num_entries)
51
+ {
52
+ return num_entries + (num_entries / 3);
53
+ }
54
+
55
+ /*
56
+ * Calculate the optimal table size, given the specified max number
57
+ * of elements.
58
+ */
59
+ static size_t hashmap_table_size_calc(size_t num_entries)
60
+ {
61
+ size_t table_size;
62
+ size_t min_size;
63
+
64
+ table_size = hashmap_table_min_size_calc(num_entries);
65
+
66
+ /* Table size is always a power of 2 */
67
+ min_size = HASHMAP_SIZE_MIN;
68
+ while (min_size < table_size) {
69
+ min_size <<= 1;
70
+ }
71
+ return min_size;
72
+ }
73
+
74
+ /*
75
+ * Get a valid hash table index from a key.
76
+ */
77
+ static inline size_t hashmap_calc_index(const struct hashmap *map,
78
+ const void *key)
79
+ {
80
+ return HASHMAP_SIZE_MOD(map, map->hash(key));
81
+ }
82
+
83
+ /*
84
+ * Return the next populated entry, starting with the specified one.
85
+ * Returns NULL if there are no more valid entries.
86
+ */
87
+ static struct hashmap_entry *hashmap_entry_get_populated(
88
+ const struct hashmap *map, struct hashmap_entry *entry)
89
+ {
90
+ for (; entry < &map->table[map->table_size]; ++entry) {
91
+ if (entry->key) {
92
+ return entry;
93
+ }
94
+ }
95
+ return NULL;
96
+ }
97
+
98
+ /*
99
+ * Find the hashmap entry with the specified key, or an empty slot.
100
+ * Returns NULL if the entire table has been searched without finding a match.
101
+ */
102
+ static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map,
103
+ const void *key, bool find_empty)
104
+ {
105
+ size_t i;
106
+ size_t index;
107
+ size_t probe_len = HASHMAP_PROBE_LEN(map);
108
+ struct hashmap_entry *entry;
109
+
110
+ index = hashmap_calc_index(map, key);
111
+
112
+ /* Linear probing */
113
+ for (i = 0; i < probe_len; ++i) {
114
+ entry = &map->table[index];
115
+ if (!entry->key) {
116
+ if (find_empty) {
117
+ #ifdef HASHMAP_METRICS
118
+ entry->num_collisions = i;
119
+ #endif
120
+ return entry;
121
+ }
122
+ return NULL;
123
+ }
124
+ if (map->key_compare(key, entry->key) == 0) {
125
+ return entry;
126
+ }
127
+ index = HASHMAP_PROBE_NEXT(map, index);
128
+ }
129
+ return NULL;
130
+ }
131
+
132
+ /*
133
+ * Removes the specified entry and processes the proceeding entries to reduce
134
+ * the load factor and keep the chain continuous. This is a required
135
+ * step for hash maps using linear probing.
136
+ */
137
+ static void hashmap_entry_remove(struct hashmap *map,
138
+ struct hashmap_entry *removed_entry)
139
+ {
140
+ size_t i;
141
+ #ifdef HASHMAP_METRICS
142
+ size_t removed_i = 0;
143
+ #endif
144
+ size_t index;
145
+ size_t entry_index;
146
+ size_t removed_index = (removed_entry - map->table);
147
+ struct hashmap_entry *entry;
148
+
149
+ /* Free the key */
150
+ if (map->key_free) {
151
+ map->key_free(removed_entry->key);
152
+ }
153
+ --map->num_entries;
154
+
155
+ /* Fill the free slot in the chain */
156
+ index = HASHMAP_PROBE_NEXT(map, removed_index);
157
+ for (i = 1; i < map->table_size; ++i) {
158
+ entry = &map->table[index];
159
+ if (!entry->key) {
160
+ /* Reached end of chain */
161
+ break;
162
+ }
163
+ entry_index = hashmap_calc_index(map, entry->key);
164
+ /* Shift in entries with an index <= to the removed slot */
165
+ if (HASHMAP_INDEX_LE(map, removed_index, entry_index)) {
166
+ #ifdef HASHMAP_METRICS
167
+ entry->num_collisions -= (i - removed_i);
168
+ removed_i = i;
169
+ #endif
170
+ memcpy(removed_entry, entry, sizeof(*removed_entry));
171
+ removed_index = index;
172
+ removed_entry = entry;
173
+ }
174
+ index = HASHMAP_PROBE_NEXT(map, index);
175
+ }
176
+ /* Clear the last removed entry */
177
+ memset(removed_entry, 0, sizeof(*removed_entry));
178
+ }
179
+
180
+ /*
181
+ * Reallocates the hash table to the new size and rehashes all entries.
182
+ * new_size MUST be a power of 2.
183
+ * Returns 0 on success and -errno on allocation or hash function failure.
184
+ */
185
+ static int hashmap_rehash(struct hashmap *map, size_t new_size)
186
+ {
187
+ size_t old_size;
188
+ struct hashmap_entry *old_table;
189
+ struct hashmap_entry *new_table;
190
+ struct hashmap_entry *entry;
191
+ struct hashmap_entry *new_entry;
192
+
193
+ HASHMAP_ASSERT(new_size >= HASHMAP_SIZE_MIN);
194
+ HASHMAP_ASSERT((new_size & (new_size - 1)) == 0);
195
+
196
+ new_table = (struct hashmap_entry *)calloc(new_size,
197
+ sizeof(struct hashmap_entry));
198
+ if (!new_table) {
199
+ return -ENOMEM;
200
+ }
201
+ /* Backup old elements in case of rehash failure */
202
+ old_size = map->table_size;
203
+ old_table = map->table;
204
+ map->table_size = new_size;
205
+ map->table = new_table;
206
+ /* Rehash */
207
+ for (entry = old_table; entry < &old_table[old_size]; ++entry) {
208
+ if (!entry->data) {
209
+ /* Only copy entries with data */
210
+ continue;
211
+ }
212
+ new_entry = hashmap_entry_find(map, entry->key, true);
213
+ if (!new_entry) {
214
+ /*
215
+ * The load factor is too high with the new table
216
+ * size, or a poor hash function was used.
217
+ */
218
+ goto revert;
219
+ }
220
+ /* Shallow copy (intentionally omits num_collisions) */
221
+ new_entry->key = entry->key;
222
+ new_entry->data = entry->data;
223
+ }
224
+ free(old_table);
225
+ return 0;
226
+ revert:
227
+ map->table_size = old_size;
228
+ map->table = old_table;
229
+ free(new_table);
230
+ return -EINVAL;
231
+ }
232
+
233
+ /*
234
+ * Iterate through all entries and free all keys.
235
+ */
236
+ static void hashmap_free_keys(struct hashmap *map)
237
+ {
238
+ struct hashmap_iter *iter;
239
+
240
+ if (!map->key_free) {
241
+ return;
242
+ }
243
+ for (iter = hashmap_iter(map); iter;
244
+ iter = hashmap_iter_next(map, iter)) {
245
+ map->key_free((void *)hashmap_iter_get_key(iter));
246
+ }
247
+ }
248
+
249
+ /*
250
+ * Initialize an empty hashmap. A hash function and a key comparator are
251
+ * required.
252
+ *
253
+ * hash_func should return an even distribution of numbers between 0
254
+ * and SIZE_MAX varying on the key provided.
255
+ *
256
+ * key_compare_func should return 0 if the keys match, and non-zero otherwise.
257
+ *
258
+ * initial_size is optional, and may be set to the max number of entries
259
+ * expected to be put in the hash table. This is used as a hint to
260
+ * pre-allocate the hash table to the minimum size needed to avoid
261
+ * gratuitous rehashes. If initial_size 0, a default size will be used.
262
+ *
263
+ * Returns 0 on success and -errno on failure.
264
+ */
265
+ int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *),
266
+ int (*key_compare_func)(const void *, const void *),
267
+ size_t initial_size)
268
+ {
269
+ HASHMAP_ASSERT(map != NULL);
270
+ HASHMAP_ASSERT(hash_func != NULL);
271
+ HASHMAP_ASSERT(key_compare_func != NULL);
272
+
273
+ if (!initial_size) {
274
+ initial_size = HASHMAP_SIZE_DEFAULT;
275
+ } else {
276
+ /* Convert init size to valid table size */
277
+ initial_size = hashmap_table_size_calc(initial_size);
278
+ }
279
+ map->table_size_init = initial_size;
280
+ map->table_size = initial_size;
281
+ map->num_entries = 0;
282
+ map->table = (struct hashmap_entry *)calloc(initial_size,
283
+ sizeof(struct hashmap_entry));
284
+ if (!map->table) {
285
+ return -ENOMEM;
286
+ }
287
+ map->hash = hash_func;
288
+ map->key_compare = key_compare_func;
289
+ map->key_alloc = NULL;
290
+ map->key_free = NULL;
291
+ return 0;
292
+ }
293
+
294
+ /*
295
+ * Free the hashmap and all associated memory.
296
+ */
297
+ void hashmap_destroy(struct hashmap *map)
298
+ {
299
+ if (!map) {
300
+ return;
301
+ }
302
+ hashmap_free_keys(map);
303
+ free(map->table);
304
+ memset(map, 0, sizeof(*map));
305
+ }
306
+
307
+ /*
308
+ * Enable internal memory management of hash keys.
309
+ */
310
+ void hashmap_set_key_alloc_funcs(struct hashmap *map,
311
+ void *(*key_alloc_func)(const void *),
312
+ void (*key_free_func)(void *))
313
+ {
314
+ HASHMAP_ASSERT(map != NULL);
315
+
316
+ map->key_alloc = key_alloc_func;
317
+ map->key_free = key_free_func;
318
+ }
319
+
320
+ /*
321
+ * Add an entry to the hashmap. If an entry with a matching key already
322
+ * exists and has a data pointer associated with it, the existing data
323
+ * pointer is returned, instead of assigning the new value. Compare
324
+ * the return value with the data passed in to determine if a new entry was
325
+ * created. Returns NULL if memory allocation failed.
326
+ */
327
+ void *hashmap_put(struct hashmap *map, const void *key, void *data)
328
+ {
329
+ struct hashmap_entry *entry;
330
+
331
+ HASHMAP_ASSERT(map != NULL);
332
+ HASHMAP_ASSERT(key != NULL);
333
+
334
+ /* Rehash with 2x capacity if load factor is approaching 0.75 */
335
+ if (map->table_size <= hashmap_table_min_size_calc(map->num_entries)) {
336
+ hashmap_rehash(map, map->table_size << 1);
337
+ }
338
+ entry = hashmap_entry_find(map, key, true);
339
+ if (!entry) {
340
+ /*
341
+ * Cannot find an empty slot. Either out of memory, or using
342
+ * a poor hash function. Attempt to rehash once to reduce
343
+ * chain length.
344
+ */
345
+ if (hashmap_rehash(map, map->table_size << 1) < 0) {
346
+ return NULL;
347
+ }
348
+ entry = hashmap_entry_find(map, key, true);
349
+ if (!entry) {
350
+ return NULL;
351
+ }
352
+ }
353
+ if (!entry->key) {
354
+ /* Allocate copy of key to simplify memory management */
355
+ if (map->key_alloc) {
356
+ entry->key = map->key_alloc(key);
357
+ if (!entry->key) {
358
+ return NULL;
359
+ }
360
+ } else {
361
+ entry->key = (void *)key;
362
+ }
363
+ ++map->num_entries;
364
+ } else if (entry->data) {
365
+ /* Do not overwrite existing data */
366
+ return entry->data;
367
+ }
368
+ entry->data = data;
369
+ return data;
370
+ }
371
+
372
+ /*
373
+ * Return the data pointer, or NULL if no entry exists.
374
+ */
375
+ void *hashmap_get(const struct hashmap *map, const void *key)
376
+ {
377
+ struct hashmap_entry *entry;
378
+
379
+ HASHMAP_ASSERT(map != NULL);
380
+ HASHMAP_ASSERT(key != NULL);
381
+
382
+ entry = hashmap_entry_find(map, key, false);
383
+ if (!entry) {
384
+ return NULL;
385
+ }
386
+ return entry->data;
387
+ }
388
+
389
+ /*
390
+ * Remove an entry with the specified key from the map.
391
+ * Returns the data pointer, or NULL, if no entry was found.
392
+ */
393
+ void *hashmap_remove(struct hashmap *map, const void *key)
394
+ {
395
+ struct hashmap_entry *entry;
396
+ void *data;
397
+
398
+ HASHMAP_ASSERT(map != NULL);
399
+ HASHMAP_ASSERT(key != NULL);
400
+
401
+ entry = hashmap_entry_find(map, key, false);
402
+ if (!entry) {
403
+ return NULL;
404
+ }
405
+ data = entry->data;
406
+ /* Clear the entry and make the chain contiguous */
407
+ hashmap_entry_remove(map, entry);
408
+ return data;
409
+ }
410
+
411
+ /*
412
+ * Remove all entries.
413
+ */
414
+ void hashmap_clear(struct hashmap *map)
415
+ {
416
+ HASHMAP_ASSERT(map != NULL);
417
+
418
+ hashmap_free_keys(map);
419
+ map->num_entries = 0;
420
+ memset(map->table, 0, sizeof(struct hashmap_entry) * map->table_size);
421
+ }
422
+
423
+ /*
424
+ * Remove all entries and reset the hash table to its initial size.
425
+ */
426
+ void hashmap_reset(struct hashmap *map)
427
+ {
428
+ struct hashmap_entry *new_table;
429
+
430
+ HASHMAP_ASSERT(map != NULL);
431
+
432
+ hashmap_clear(map);
433
+ if (map->table_size == map->table_size_init) {
434
+ return;
435
+ }
436
+ new_table = (struct hashmap_entry *)realloc(map->table,
437
+ sizeof(struct hashmap_entry) * map->table_size_init);
438
+ if (!new_table) {
439
+ return;
440
+ }
441
+ map->table = new_table;
442
+ map->table_size = map->table_size_init;
443
+ }
444
+
445
+ /*
446
+ * Return the number of entries in the hash map.
447
+ */
448
+ size_t hashmap_size(const struct hashmap *map)
449
+ {
450
+ HASHMAP_ASSERT(map != NULL);
451
+
452
+ return map->num_entries;
453
+ }
454
+
455
+ /*
456
+ * Get a new hashmap iterator. The iterator is an opaque
457
+ * pointer that may be used with hashmap_iter_*() functions.
458
+ * Hashmap iterators are INVALID after a put or remove operation is performed.
459
+ * hashmap_iter_remove() allows safe removal during iteration.
460
+ */
461
+ struct hashmap_iter *hashmap_iter(const struct hashmap *map)
462
+ {
463
+ HASHMAP_ASSERT(map != NULL);
464
+
465
+ if (!map->num_entries) {
466
+ return NULL;
467
+ }
468
+ return (struct hashmap_iter *)hashmap_entry_get_populated(map,
469
+ map->table);
470
+ }
471
+
472
+ /*
473
+ * Return an iterator to the next hashmap entry. Returns NULL if there are
474
+ * no more entries.
475
+ */
476
+ struct hashmap_iter *hashmap_iter_next(const struct hashmap *map,
477
+ const struct hashmap_iter *iter)
478
+ {
479
+ struct hashmap_entry *entry = (struct hashmap_entry *)iter;
480
+
481
+ HASHMAP_ASSERT(map != NULL);
482
+
483
+ if (!iter) {
484
+ return NULL;
485
+ }
486
+ return (struct hashmap_iter *)hashmap_entry_get_populated(map,
487
+ entry + 1);
488
+ }
489
+
490
+ /*
491
+ * Remove the hashmap entry pointed to by this iterator and return an
492
+ * iterator to the next entry. Returns NULL if there are no more entries.
493
+ */
494
+ struct hashmap_iter *hashmap_iter_remove(struct hashmap *map,
495
+ const struct hashmap_iter *iter)
496
+ {
497
+ struct hashmap_entry *entry = (struct hashmap_entry *)iter;
498
+
499
+ HASHMAP_ASSERT(map != NULL);
500
+
501
+ if (!iter) {
502
+ return NULL;
503
+ }
504
+ if (!entry->key) {
505
+ /* Iterator is invalid, so just return the next valid entry */
506
+ return hashmap_iter_next(map, iter);
507
+ }
508
+ hashmap_entry_remove(map, entry);
509
+ return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry);
510
+ }
511
+
512
+ /*
513
+ * Return the key of the entry pointed to by the iterator.
514
+ */
515
+ const void *hashmap_iter_get_key(const struct hashmap_iter *iter)
516
+ {
517
+ if (!iter) {
518
+ return NULL;
519
+ }
520
+ return (const void *)((struct hashmap_entry *)iter)->key;
521
+ }
522
+
523
+ /*
524
+ * Return the data of the entry pointed to by the iterator.
525
+ */
526
+ void *hashmap_iter_get_data(const struct hashmap_iter *iter)
527
+ {
528
+ if (!iter) {
529
+ return NULL;
530
+ }
531
+ return ((struct hashmap_entry *)iter)->data;
532
+ }
533
+
534
+ /*
535
+ * Set the data pointer of the entry pointed to by the iterator.
536
+ */
537
+ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data)
538
+ {
539
+ if (!iter) {
540
+ return;
541
+ }
542
+ ((struct hashmap_entry *)iter)->data = data;
543
+ }
544
+
545
+ /*
546
+ * Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*()
547
+ * interface, this function supports calls to hashmap_remove() during iteration.
548
+ * However, it is an error to put or remove an entry other than the current one,
549
+ * and doing so will immediately halt iteration and return an error.
550
+ * Iteration is stopped if func returns non-zero. Returns func's return
551
+ * value if it is < 0, otherwise, 0.
552
+ */
553
+ int hashmap_foreach(const struct hashmap *map,
554
+ int (*func)(const void *, void *, void *), void *arg)
555
+ {
556
+ struct hashmap_entry *entry;
557
+ size_t num_entries;
558
+ const void *key;
559
+ int rc;
560
+
561
+ HASHMAP_ASSERT(map != NULL);
562
+ HASHMAP_ASSERT(func != NULL);
563
+
564
+ entry = map->table;
565
+ for (entry = map->table; entry < &map->table[map->table_size];
566
+ ++entry) {
567
+ if (!entry->key) {
568
+ continue;
569
+ }
570
+ num_entries = map->num_entries;
571
+ key = entry->key;
572
+ rc = func(entry->key, entry->data, arg);
573
+ if (rc < 0) {
574
+ return rc;
575
+ }
576
+ if (rc > 0) {
577
+ return 0;
578
+ }
579
+ /* Run this entry again if func() deleted it */
580
+ if (entry->key != key) {
581
+ --entry;
582
+ } else if (num_entries != map->num_entries) {
583
+ /* Stop immediately if func put/removed another entry */
584
+ return -1;
585
+ }
586
+ }
587
+ return 0;
588
+ }
589
+
590
+ /*
591
+ * Default hash function for string keys.
592
+ * This is an implementation of the well-documented Jenkins one-at-a-time
593
+ * hash function.
594
+ */
595
+ size_t hashmap_hash_string(const void *key)
596
+ {
597
+ const char *key_str = (const char *)key;
598
+ size_t hash = 0;
599
+
600
+ for (; *key_str; ++key_str) {
601
+ hash += *key_str;
602
+ hash += (hash << 10);
603
+ hash ^= (hash >> 6);
604
+ }
605
+ hash += (hash << 3);
606
+ hash ^= (hash >> 11);
607
+ hash += (hash << 15);
608
+ return hash;
609
+ }
610
+
611
+ /*
612
+ * Default key comparator function for string keys.
613
+ */
614
+ int hashmap_compare_string(const void *a, const void *b)
615
+ {
616
+ return strcmp((const char *)a, (const char *)b);
617
+ }
618
+
619
+ /*
620
+ * Default key allocation function for string keys. Use free() for the
621
+ * key_free_func.
622
+ */
623
+ void *hashmap_alloc_key_string(const void *key)
624
+ {
625
+ return (void *)strdup((const char *)key);
626
+ }
627
+
628
+ #ifdef HASHMAP_METRICS
629
+ /*
630
+ * Return the load factor.
631
+ */
632
+ double hashmap_load_factor(const struct hashmap *map)
633
+ {
634
+ HASHMAP_ASSERT(map != NULL);
635
+
636
+ if (!map->table_size) {
637
+ return 0;
638
+ }
639
+ return (double)map->num_entries / map->table_size;
640
+ }
641
+
642
+ /*
643
+ * Return the average number of collisions per entry.
644
+ */
645
+ double hashmap_collisions_mean(const struct hashmap *map)
646
+ {
647
+ struct hashmap_entry *entry;
648
+ size_t total_collisions = 0;
649
+
650
+ HASHMAP_ASSERT(map != NULL);
651
+
652
+ if (!map->num_entries) {
653
+ return 0;
654
+ }
655
+ for (entry = map->table; entry < &map->table[map->table_size];
656
+ ++entry) {
657
+ if (!entry->key) {
658
+ continue;
659
+ }
660
+ total_collisions += entry->num_collisions;
661
+ }
662
+ return (double)total_collisions / map->num_entries;
663
+ }
664
+
665
+ /*
666
+ * Return the variance between entry collisions. The higher the variance,
667
+ * the more likely the hash function is poor and is resulting in clustering.
668
+ */
669
+ double hashmap_collisions_variance(const struct hashmap *map)
670
+ {
671
+ struct hashmap_entry *entry;
672
+ double mean_collisions;
673
+ double variance;
674
+ double total_variance = 0;
675
+
676
+ HASHMAP_ASSERT(map != NULL);
677
+
678
+ if (!map->num_entries) {
679
+ return 0;
680
+ }
681
+ mean_collisions = hashmap_collisions_mean(map);
682
+ for (entry = map->table; entry < &map->table[map->table_size];
683
+ ++entry) {
684
+ if (!entry->key) {
685
+ continue;
686
+ }
687
+ variance = (double)entry->num_collisions - mean_collisions;
688
+ total_variance += variance * variance;
689
+ }
690
+ return total_variance / map->num_entries;
691
+ }
692
+ #endif