prometheus-client-mmap 0.9.8 → 0.12.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/fast_mmaped_file/fast_mmaped_file.c +5 -8
- data/ext/fast_mmaped_file/file_parsing.c +2 -2
- data/ext/fast_mmaped_file/file_parsing.h +1 -0
- data/ext/fast_mmaped_file/file_reading.c +2 -2
- data/ext/fast_mmaped_file/hashmap.c +692 -0
- data/ext/fast_mmaped_file/jsmn.c +314 -0
- data/ext/fast_mmaped_file/mmap.c +43 -17
- data/ext/fast_mmaped_file/rendering.c +2 -1
- data/ext/fast_mmaped_file/utils.c +2 -3
- data/ext/fast_mmaped_file/utils.h +1 -0
- data/ext/fast_mmaped_file/value_access.c +2 -4
- data/ext/fast_mmaped_file/value_access.h +1 -0
- data/lib/fast_mmaped_file.bundle +0 -0
- data/lib/prometheus/client.rb +14 -2
- data/lib/prometheus/client/label_set_validator.rb +14 -2
- data/lib/prometheus/client/mmaped_dict.rb +4 -0
- data/lib/prometheus/client/version.rb +1 -1
- metadata +5 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3fec107115b6e90ebfbc109f0603bb44ca7d7165e637a2e6448ce463ab54c3c7
|
4
|
+
data.tar.gz: 9e9e0382069f57aebe0fb35508a1b82a8e1c61b4b228be573807241b2a434fb8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6747ece05958e5b65a92e7305e8edb167e7ffafa84d5f0b24ccc56e8c8977add781f7c88eab1ecca117ae88bb0bbd7150273307f0c5314f33249246cef317dbf
|
7
|
+
data.tar.gz: 7454daa53c15a7cf57e359ebec87dda164609ad43620ebf8a72e287cac4113caba08de8ca3ba2d1efdcf1c0a05f2bdbfb7c2b524d8eb27e63444844f06469ed5
|
@@ -1,20 +1,17 @@
|
|
1
1
|
#include <errno.h>
|
2
|
+
#include <hashmap.h>
|
3
|
+
#include <jsmn.h>
|
2
4
|
#include <ruby.h>
|
3
5
|
#include <ruby/intern.h>
|
4
|
-
|
5
6
|
#include <sys/mman.h>
|
6
7
|
|
7
|
-
#include <hashmap.h>
|
8
|
-
#include <jsmn.h>
|
9
|
-
|
10
|
-
#include "globals.h"
|
11
|
-
#include "utils.h"
|
12
|
-
#include "value_access.h"
|
13
|
-
|
14
8
|
#include "file_parsing.h"
|
15
9
|
#include "file_reading.h"
|
10
|
+
#include "globals.h"
|
16
11
|
#include "mmap.h"
|
17
12
|
#include "rendering.h"
|
13
|
+
#include "utils.h"
|
14
|
+
#include "value_access.h"
|
18
15
|
|
19
16
|
VALUE MMAPED_FILE = Qnil;
|
20
17
|
|
@@ -0,0 +1,692 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) 2016-2017 David Leeds <davidesleeds@gmail.com>
|
3
|
+
*
|
4
|
+
* Hashmap is free software; you can redistribute it and/or modify
|
5
|
+
* it under the terms of the MIT license. See LICENSE for details.
|
6
|
+
*/
|
7
|
+
|
8
|
+
#include <stdlib.h>
|
9
|
+
#include <stdint.h>
|
10
|
+
#include <stdbool.h>
|
11
|
+
#include <string.h>
|
12
|
+
#include <errno.h>
|
13
|
+
|
14
|
+
#include "hashmap.h"
|
15
|
+
|
16
|
+
#ifndef HASHMAP_NOASSERT
|
17
|
+
#include <assert.h>
|
18
|
+
#define HASHMAP_ASSERT(expr) assert(expr)
|
19
|
+
#else
|
20
|
+
#define HASHMAP_ASSERT(expr)
|
21
|
+
#endif
|
22
|
+
|
23
|
+
/* Table sizes must be powers of 2 */
|
24
|
+
#define HASHMAP_SIZE_MIN (1 << 5) /* 32 */
|
25
|
+
#define HASHMAP_SIZE_DEFAULT (1 << 8) /* 256 */
|
26
|
+
#define HASHMAP_SIZE_MOD(map, val) ((val) & ((map)->table_size - 1))
|
27
|
+
|
28
|
+
/* Limit for probing is 1/2 of table_size */
|
29
|
+
#define HASHMAP_PROBE_LEN(map) ((map)->table_size >> 1)
|
30
|
+
/* Return the next linear probe index */
|
31
|
+
#define HASHMAP_PROBE_NEXT(map, index) HASHMAP_SIZE_MOD(map, (index) + 1)
|
32
|
+
|
33
|
+
/* Check if index b is less than or equal to index a */
|
34
|
+
#define HASHMAP_INDEX_LE(map, a, b) \
|
35
|
+
((a) == (b) || (((b) - (a)) & ((map)->table_size >> 1)) != 0)
|
36
|
+
|
37
|
+
|
38
|
+
struct hashmap_entry {
|
39
|
+
void *key;
|
40
|
+
void *data;
|
41
|
+
#ifdef HASHMAP_METRICS
|
42
|
+
size_t num_collisions;
|
43
|
+
#endif
|
44
|
+
};
|
45
|
+
|
46
|
+
|
47
|
+
/*
|
48
|
+
* Enforce a maximum 0.75 load factor.
|
49
|
+
*/
|
50
|
+
static inline size_t hashmap_table_min_size_calc(size_t num_entries)
|
51
|
+
{
|
52
|
+
return num_entries + (num_entries / 3);
|
53
|
+
}
|
54
|
+
|
55
|
+
/*
|
56
|
+
* Calculate the optimal table size, given the specified max number
|
57
|
+
* of elements.
|
58
|
+
*/
|
59
|
+
static size_t hashmap_table_size_calc(size_t num_entries)
|
60
|
+
{
|
61
|
+
size_t table_size;
|
62
|
+
size_t min_size;
|
63
|
+
|
64
|
+
table_size = hashmap_table_min_size_calc(num_entries);
|
65
|
+
|
66
|
+
/* Table size is always a power of 2 */
|
67
|
+
min_size = HASHMAP_SIZE_MIN;
|
68
|
+
while (min_size < table_size) {
|
69
|
+
min_size <<= 1;
|
70
|
+
}
|
71
|
+
return min_size;
|
72
|
+
}
|
73
|
+
|
74
|
+
/*
|
75
|
+
* Get a valid hash table index from a key.
|
76
|
+
*/
|
77
|
+
static inline size_t hashmap_calc_index(const struct hashmap *map,
|
78
|
+
const void *key)
|
79
|
+
{
|
80
|
+
return HASHMAP_SIZE_MOD(map, map->hash(key));
|
81
|
+
}
|
82
|
+
|
83
|
+
/*
|
84
|
+
* Return the next populated entry, starting with the specified one.
|
85
|
+
* Returns NULL if there are no more valid entries.
|
86
|
+
*/
|
87
|
+
static struct hashmap_entry *hashmap_entry_get_populated(
|
88
|
+
const struct hashmap *map, struct hashmap_entry *entry)
|
89
|
+
{
|
90
|
+
for (; entry < &map->table[map->table_size]; ++entry) {
|
91
|
+
if (entry->key) {
|
92
|
+
return entry;
|
93
|
+
}
|
94
|
+
}
|
95
|
+
return NULL;
|
96
|
+
}
|
97
|
+
|
98
|
+
/*
|
99
|
+
* Find the hashmap entry with the specified key, or an empty slot.
|
100
|
+
* Returns NULL if the entire table has been searched without finding a match.
|
101
|
+
*/
|
102
|
+
static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map,
|
103
|
+
const void *key, bool find_empty)
|
104
|
+
{
|
105
|
+
size_t i;
|
106
|
+
size_t index;
|
107
|
+
size_t probe_len = HASHMAP_PROBE_LEN(map);
|
108
|
+
struct hashmap_entry *entry;
|
109
|
+
|
110
|
+
index = hashmap_calc_index(map, key);
|
111
|
+
|
112
|
+
/* Linear probing */
|
113
|
+
for (i = 0; i < probe_len; ++i) {
|
114
|
+
entry = &map->table[index];
|
115
|
+
if (!entry->key) {
|
116
|
+
if (find_empty) {
|
117
|
+
#ifdef HASHMAP_METRICS
|
118
|
+
entry->num_collisions = i;
|
119
|
+
#endif
|
120
|
+
return entry;
|
121
|
+
}
|
122
|
+
return NULL;
|
123
|
+
}
|
124
|
+
if (map->key_compare(key, entry->key) == 0) {
|
125
|
+
return entry;
|
126
|
+
}
|
127
|
+
index = HASHMAP_PROBE_NEXT(map, index);
|
128
|
+
}
|
129
|
+
return NULL;
|
130
|
+
}
|
131
|
+
|
132
|
+
/*
|
133
|
+
* Removes the specified entry and processes the proceeding entries to reduce
|
134
|
+
* the load factor and keep the chain continuous. This is a required
|
135
|
+
* step for hash maps using linear probing.
|
136
|
+
*/
|
137
|
+
static void hashmap_entry_remove(struct hashmap *map,
|
138
|
+
struct hashmap_entry *removed_entry)
|
139
|
+
{
|
140
|
+
size_t i;
|
141
|
+
#ifdef HASHMAP_METRICS
|
142
|
+
size_t removed_i = 0;
|
143
|
+
#endif
|
144
|
+
size_t index;
|
145
|
+
size_t entry_index;
|
146
|
+
size_t removed_index = (removed_entry - map->table);
|
147
|
+
struct hashmap_entry *entry;
|
148
|
+
|
149
|
+
/* Free the key */
|
150
|
+
if (map->key_free) {
|
151
|
+
map->key_free(removed_entry->key);
|
152
|
+
}
|
153
|
+
--map->num_entries;
|
154
|
+
|
155
|
+
/* Fill the free slot in the chain */
|
156
|
+
index = HASHMAP_PROBE_NEXT(map, removed_index);
|
157
|
+
for (i = 1; i < map->table_size; ++i) {
|
158
|
+
entry = &map->table[index];
|
159
|
+
if (!entry->key) {
|
160
|
+
/* Reached end of chain */
|
161
|
+
break;
|
162
|
+
}
|
163
|
+
entry_index = hashmap_calc_index(map, entry->key);
|
164
|
+
/* Shift in entries with an index <= to the removed slot */
|
165
|
+
if (HASHMAP_INDEX_LE(map, removed_index, entry_index)) {
|
166
|
+
#ifdef HASHMAP_METRICS
|
167
|
+
entry->num_collisions -= (i - removed_i);
|
168
|
+
removed_i = i;
|
169
|
+
#endif
|
170
|
+
memcpy(removed_entry, entry, sizeof(*removed_entry));
|
171
|
+
removed_index = index;
|
172
|
+
removed_entry = entry;
|
173
|
+
}
|
174
|
+
index = HASHMAP_PROBE_NEXT(map, index);
|
175
|
+
}
|
176
|
+
/* Clear the last removed entry */
|
177
|
+
memset(removed_entry, 0, sizeof(*removed_entry));
|
178
|
+
}
|
179
|
+
|
180
|
+
/*
|
181
|
+
* Reallocates the hash table to the new size and rehashes all entries.
|
182
|
+
* new_size MUST be a power of 2.
|
183
|
+
* Returns 0 on success and -errno on allocation or hash function failure.
|
184
|
+
*/
|
185
|
+
static int hashmap_rehash(struct hashmap *map, size_t new_size)
|
186
|
+
{
|
187
|
+
size_t old_size;
|
188
|
+
struct hashmap_entry *old_table;
|
189
|
+
struct hashmap_entry *new_table;
|
190
|
+
struct hashmap_entry *entry;
|
191
|
+
struct hashmap_entry *new_entry;
|
192
|
+
|
193
|
+
HASHMAP_ASSERT(new_size >= HASHMAP_SIZE_MIN);
|
194
|
+
HASHMAP_ASSERT((new_size & (new_size - 1)) == 0);
|
195
|
+
|
196
|
+
new_table = (struct hashmap_entry *)calloc(new_size,
|
197
|
+
sizeof(struct hashmap_entry));
|
198
|
+
if (!new_table) {
|
199
|
+
return -ENOMEM;
|
200
|
+
}
|
201
|
+
/* Backup old elements in case of rehash failure */
|
202
|
+
old_size = map->table_size;
|
203
|
+
old_table = map->table;
|
204
|
+
map->table_size = new_size;
|
205
|
+
map->table = new_table;
|
206
|
+
/* Rehash */
|
207
|
+
for (entry = old_table; entry < &old_table[old_size]; ++entry) {
|
208
|
+
if (!entry->data) {
|
209
|
+
/* Only copy entries with data */
|
210
|
+
continue;
|
211
|
+
}
|
212
|
+
new_entry = hashmap_entry_find(map, entry->key, true);
|
213
|
+
if (!new_entry) {
|
214
|
+
/*
|
215
|
+
* The load factor is too high with the new table
|
216
|
+
* size, or a poor hash function was used.
|
217
|
+
*/
|
218
|
+
goto revert;
|
219
|
+
}
|
220
|
+
/* Shallow copy (intentionally omits num_collisions) */
|
221
|
+
new_entry->key = entry->key;
|
222
|
+
new_entry->data = entry->data;
|
223
|
+
}
|
224
|
+
free(old_table);
|
225
|
+
return 0;
|
226
|
+
revert:
|
227
|
+
map->table_size = old_size;
|
228
|
+
map->table = old_table;
|
229
|
+
free(new_table);
|
230
|
+
return -EINVAL;
|
231
|
+
}
|
232
|
+
|
233
|
+
/*
|
234
|
+
* Iterate through all entries and free all keys.
|
235
|
+
*/
|
236
|
+
static void hashmap_free_keys(struct hashmap *map)
|
237
|
+
{
|
238
|
+
struct hashmap_iter *iter;
|
239
|
+
|
240
|
+
if (!map->key_free) {
|
241
|
+
return;
|
242
|
+
}
|
243
|
+
for (iter = hashmap_iter(map); iter;
|
244
|
+
iter = hashmap_iter_next(map, iter)) {
|
245
|
+
map->key_free((void *)hashmap_iter_get_key(iter));
|
246
|
+
}
|
247
|
+
}
|
248
|
+
|
249
|
+
/*
|
250
|
+
* Initialize an empty hashmap. A hash function and a key comparator are
|
251
|
+
* required.
|
252
|
+
*
|
253
|
+
* hash_func should return an even distribution of numbers between 0
|
254
|
+
* and SIZE_MAX varying on the key provided.
|
255
|
+
*
|
256
|
+
* key_compare_func should return 0 if the keys match, and non-zero otherwise.
|
257
|
+
*
|
258
|
+
* initial_size is optional, and may be set to the max number of entries
|
259
|
+
* expected to be put in the hash table. This is used as a hint to
|
260
|
+
* pre-allocate the hash table to the minimum size needed to avoid
|
261
|
+
* gratuitous rehashes. If initial_size 0, a default size will be used.
|
262
|
+
*
|
263
|
+
* Returns 0 on success and -errno on failure.
|
264
|
+
*/
|
265
|
+
int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *),
|
266
|
+
int (*key_compare_func)(const void *, const void *),
|
267
|
+
size_t initial_size)
|
268
|
+
{
|
269
|
+
HASHMAP_ASSERT(map != NULL);
|
270
|
+
HASHMAP_ASSERT(hash_func != NULL);
|
271
|
+
HASHMAP_ASSERT(key_compare_func != NULL);
|
272
|
+
|
273
|
+
if (!initial_size) {
|
274
|
+
initial_size = HASHMAP_SIZE_DEFAULT;
|
275
|
+
} else {
|
276
|
+
/* Convert init size to valid table size */
|
277
|
+
initial_size = hashmap_table_size_calc(initial_size);
|
278
|
+
}
|
279
|
+
map->table_size_init = initial_size;
|
280
|
+
map->table_size = initial_size;
|
281
|
+
map->num_entries = 0;
|
282
|
+
map->table = (struct hashmap_entry *)calloc(initial_size,
|
283
|
+
sizeof(struct hashmap_entry));
|
284
|
+
if (!map->table) {
|
285
|
+
return -ENOMEM;
|
286
|
+
}
|
287
|
+
map->hash = hash_func;
|
288
|
+
map->key_compare = key_compare_func;
|
289
|
+
map->key_alloc = NULL;
|
290
|
+
map->key_free = NULL;
|
291
|
+
return 0;
|
292
|
+
}
|
293
|
+
|
294
|
+
/*
|
295
|
+
* Free the hashmap and all associated memory.
|
296
|
+
*/
|
297
|
+
void hashmap_destroy(struct hashmap *map)
|
298
|
+
{
|
299
|
+
if (!map) {
|
300
|
+
return;
|
301
|
+
}
|
302
|
+
hashmap_free_keys(map);
|
303
|
+
free(map->table);
|
304
|
+
memset(map, 0, sizeof(*map));
|
305
|
+
}
|
306
|
+
|
307
|
+
/*
|
308
|
+
* Enable internal memory management of hash keys.
|
309
|
+
*/
|
310
|
+
void hashmap_set_key_alloc_funcs(struct hashmap *map,
|
311
|
+
void *(*key_alloc_func)(const void *),
|
312
|
+
void (*key_free_func)(void *))
|
313
|
+
{
|
314
|
+
HASHMAP_ASSERT(map != NULL);
|
315
|
+
|
316
|
+
map->key_alloc = key_alloc_func;
|
317
|
+
map->key_free = key_free_func;
|
318
|
+
}
|
319
|
+
|
320
|
+
/*
|
321
|
+
* Add an entry to the hashmap. If an entry with a matching key already
|
322
|
+
* exists and has a data pointer associated with it, the existing data
|
323
|
+
* pointer is returned, instead of assigning the new value. Compare
|
324
|
+
* the return value with the data passed in to determine if a new entry was
|
325
|
+
* created. Returns NULL if memory allocation failed.
|
326
|
+
*/
|
327
|
+
void *hashmap_put(struct hashmap *map, const void *key, void *data)
|
328
|
+
{
|
329
|
+
struct hashmap_entry *entry;
|
330
|
+
|
331
|
+
HASHMAP_ASSERT(map != NULL);
|
332
|
+
HASHMAP_ASSERT(key != NULL);
|
333
|
+
|
334
|
+
/* Rehash with 2x capacity if load factor is approaching 0.75 */
|
335
|
+
if (map->table_size <= hashmap_table_min_size_calc(map->num_entries)) {
|
336
|
+
hashmap_rehash(map, map->table_size << 1);
|
337
|
+
}
|
338
|
+
entry = hashmap_entry_find(map, key, true);
|
339
|
+
if (!entry) {
|
340
|
+
/*
|
341
|
+
* Cannot find an empty slot. Either out of memory, or using
|
342
|
+
* a poor hash function. Attempt to rehash once to reduce
|
343
|
+
* chain length.
|
344
|
+
*/
|
345
|
+
if (hashmap_rehash(map, map->table_size << 1) < 0) {
|
346
|
+
return NULL;
|
347
|
+
}
|
348
|
+
entry = hashmap_entry_find(map, key, true);
|
349
|
+
if (!entry) {
|
350
|
+
return NULL;
|
351
|
+
}
|
352
|
+
}
|
353
|
+
if (!entry->key) {
|
354
|
+
/* Allocate copy of key to simplify memory management */
|
355
|
+
if (map->key_alloc) {
|
356
|
+
entry->key = map->key_alloc(key);
|
357
|
+
if (!entry->key) {
|
358
|
+
return NULL;
|
359
|
+
}
|
360
|
+
} else {
|
361
|
+
entry->key = (void *)key;
|
362
|
+
}
|
363
|
+
++map->num_entries;
|
364
|
+
} else if (entry->data) {
|
365
|
+
/* Do not overwrite existing data */
|
366
|
+
return entry->data;
|
367
|
+
}
|
368
|
+
entry->data = data;
|
369
|
+
return data;
|
370
|
+
}
|
371
|
+
|
372
|
+
/*
|
373
|
+
* Return the data pointer, or NULL if no entry exists.
|
374
|
+
*/
|
375
|
+
void *hashmap_get(const struct hashmap *map, const void *key)
|
376
|
+
{
|
377
|
+
struct hashmap_entry *entry;
|
378
|
+
|
379
|
+
HASHMAP_ASSERT(map != NULL);
|
380
|
+
HASHMAP_ASSERT(key != NULL);
|
381
|
+
|
382
|
+
entry = hashmap_entry_find(map, key, false);
|
383
|
+
if (!entry) {
|
384
|
+
return NULL;
|
385
|
+
}
|
386
|
+
return entry->data;
|
387
|
+
}
|
388
|
+
|
389
|
+
/*
|
390
|
+
* Remove an entry with the specified key from the map.
|
391
|
+
* Returns the data pointer, or NULL, if no entry was found.
|
392
|
+
*/
|
393
|
+
void *hashmap_remove(struct hashmap *map, const void *key)
|
394
|
+
{
|
395
|
+
struct hashmap_entry *entry;
|
396
|
+
void *data;
|
397
|
+
|
398
|
+
HASHMAP_ASSERT(map != NULL);
|
399
|
+
HASHMAP_ASSERT(key != NULL);
|
400
|
+
|
401
|
+
entry = hashmap_entry_find(map, key, false);
|
402
|
+
if (!entry) {
|
403
|
+
return NULL;
|
404
|
+
}
|
405
|
+
data = entry->data;
|
406
|
+
/* Clear the entry and make the chain contiguous */
|
407
|
+
hashmap_entry_remove(map, entry);
|
408
|
+
return data;
|
409
|
+
}
|
410
|
+
|
411
|
+
/*
|
412
|
+
* Remove all entries.
|
413
|
+
*/
|
414
|
+
void hashmap_clear(struct hashmap *map)
|
415
|
+
{
|
416
|
+
HASHMAP_ASSERT(map != NULL);
|
417
|
+
|
418
|
+
hashmap_free_keys(map);
|
419
|
+
map->num_entries = 0;
|
420
|
+
memset(map->table, 0, sizeof(struct hashmap_entry) * map->table_size);
|
421
|
+
}
|
422
|
+
|
423
|
+
/*
|
424
|
+
* Remove all entries and reset the hash table to its initial size.
|
425
|
+
*/
|
426
|
+
void hashmap_reset(struct hashmap *map)
|
427
|
+
{
|
428
|
+
struct hashmap_entry *new_table;
|
429
|
+
|
430
|
+
HASHMAP_ASSERT(map != NULL);
|
431
|
+
|
432
|
+
hashmap_clear(map);
|
433
|
+
if (map->table_size == map->table_size_init) {
|
434
|
+
return;
|
435
|
+
}
|
436
|
+
new_table = (struct hashmap_entry *)realloc(map->table,
|
437
|
+
sizeof(struct hashmap_entry) * map->table_size_init);
|
438
|
+
if (!new_table) {
|
439
|
+
return;
|
440
|
+
}
|
441
|
+
map->table = new_table;
|
442
|
+
map->table_size = map->table_size_init;
|
443
|
+
}
|
444
|
+
|
445
|
+
/*
|
446
|
+
* Return the number of entries in the hash map.
|
447
|
+
*/
|
448
|
+
size_t hashmap_size(const struct hashmap *map)
|
449
|
+
{
|
450
|
+
HASHMAP_ASSERT(map != NULL);
|
451
|
+
|
452
|
+
return map->num_entries;
|
453
|
+
}
|
454
|
+
|
455
|
+
/*
|
456
|
+
* Get a new hashmap iterator. The iterator is an opaque
|
457
|
+
* pointer that may be used with hashmap_iter_*() functions.
|
458
|
+
* Hashmap iterators are INVALID after a put or remove operation is performed.
|
459
|
+
* hashmap_iter_remove() allows safe removal during iteration.
|
460
|
+
*/
|
461
|
+
struct hashmap_iter *hashmap_iter(const struct hashmap *map)
|
462
|
+
{
|
463
|
+
HASHMAP_ASSERT(map != NULL);
|
464
|
+
|
465
|
+
if (!map->num_entries) {
|
466
|
+
return NULL;
|
467
|
+
}
|
468
|
+
return (struct hashmap_iter *)hashmap_entry_get_populated(map,
|
469
|
+
map->table);
|
470
|
+
}
|
471
|
+
|
472
|
+
/*
|
473
|
+
* Return an iterator to the next hashmap entry. Returns NULL if there are
|
474
|
+
* no more entries.
|
475
|
+
*/
|
476
|
+
struct hashmap_iter *hashmap_iter_next(const struct hashmap *map,
|
477
|
+
const struct hashmap_iter *iter)
|
478
|
+
{
|
479
|
+
struct hashmap_entry *entry = (struct hashmap_entry *)iter;
|
480
|
+
|
481
|
+
HASHMAP_ASSERT(map != NULL);
|
482
|
+
|
483
|
+
if (!iter) {
|
484
|
+
return NULL;
|
485
|
+
}
|
486
|
+
return (struct hashmap_iter *)hashmap_entry_get_populated(map,
|
487
|
+
entry + 1);
|
488
|
+
}
|
489
|
+
|
490
|
+
/*
|
491
|
+
* Remove the hashmap entry pointed to by this iterator and return an
|
492
|
+
* iterator to the next entry. Returns NULL if there are no more entries.
|
493
|
+
*/
|
494
|
+
struct hashmap_iter *hashmap_iter_remove(struct hashmap *map,
|
495
|
+
const struct hashmap_iter *iter)
|
496
|
+
{
|
497
|
+
struct hashmap_entry *entry = (struct hashmap_entry *)iter;
|
498
|
+
|
499
|
+
HASHMAP_ASSERT(map != NULL);
|
500
|
+
|
501
|
+
if (!iter) {
|
502
|
+
return NULL;
|
503
|
+
}
|
504
|
+
if (!entry->key) {
|
505
|
+
/* Iterator is invalid, so just return the next valid entry */
|
506
|
+
return hashmap_iter_next(map, iter);
|
507
|
+
}
|
508
|
+
hashmap_entry_remove(map, entry);
|
509
|
+
return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry);
|
510
|
+
}
|
511
|
+
|
512
|
+
/*
|
513
|
+
* Return the key of the entry pointed to by the iterator.
|
514
|
+
*/
|
515
|
+
const void *hashmap_iter_get_key(const struct hashmap_iter *iter)
|
516
|
+
{
|
517
|
+
if (!iter) {
|
518
|
+
return NULL;
|
519
|
+
}
|
520
|
+
return (const void *)((struct hashmap_entry *)iter)->key;
|
521
|
+
}
|
522
|
+
|
523
|
+
/*
|
524
|
+
* Return the data of the entry pointed to by the iterator.
|
525
|
+
*/
|
526
|
+
void *hashmap_iter_get_data(const struct hashmap_iter *iter)
|
527
|
+
{
|
528
|
+
if (!iter) {
|
529
|
+
return NULL;
|
530
|
+
}
|
531
|
+
return ((struct hashmap_entry *)iter)->data;
|
532
|
+
}
|
533
|
+
|
534
|
+
/*
|
535
|
+
* Set the data pointer of the entry pointed to by the iterator.
|
536
|
+
*/
|
537
|
+
void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data)
|
538
|
+
{
|
539
|
+
if (!iter) {
|
540
|
+
return;
|
541
|
+
}
|
542
|
+
((struct hashmap_entry *)iter)->data = data;
|
543
|
+
}
|
544
|
+
|
545
|
+
/*
|
546
|
+
* Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*()
|
547
|
+
* interface, this function supports calls to hashmap_remove() during iteration.
|
548
|
+
* However, it is an error to put or remove an entry other than the current one,
|
549
|
+
* and doing so will immediately halt iteration and return an error.
|
550
|
+
* Iteration is stopped if func returns non-zero. Returns func's return
|
551
|
+
* value if it is < 0, otherwise, 0.
|
552
|
+
*/
|
553
|
+
int hashmap_foreach(const struct hashmap *map,
|
554
|
+
int (*func)(const void *, void *, void *), void *arg)
|
555
|
+
{
|
556
|
+
struct hashmap_entry *entry;
|
557
|
+
size_t num_entries;
|
558
|
+
const void *key;
|
559
|
+
int rc;
|
560
|
+
|
561
|
+
HASHMAP_ASSERT(map != NULL);
|
562
|
+
HASHMAP_ASSERT(func != NULL);
|
563
|
+
|
564
|
+
entry = map->table;
|
565
|
+
for (entry = map->table; entry < &map->table[map->table_size];
|
566
|
+
++entry) {
|
567
|
+
if (!entry->key) {
|
568
|
+
continue;
|
569
|
+
}
|
570
|
+
num_entries = map->num_entries;
|
571
|
+
key = entry->key;
|
572
|
+
rc = func(entry->key, entry->data, arg);
|
573
|
+
if (rc < 0) {
|
574
|
+
return rc;
|
575
|
+
}
|
576
|
+
if (rc > 0) {
|
577
|
+
return 0;
|
578
|
+
}
|
579
|
+
/* Run this entry again if func() deleted it */
|
580
|
+
if (entry->key != key) {
|
581
|
+
--entry;
|
582
|
+
} else if (num_entries != map->num_entries) {
|
583
|
+
/* Stop immediately if func put/removed another entry */
|
584
|
+
return -1;
|
585
|
+
}
|
586
|
+
}
|
587
|
+
return 0;
|
588
|
+
}
|
589
|
+
|
590
|
+
/*
|
591
|
+
* Default hash function for string keys.
|
592
|
+
* This is an implementation of the well-documented Jenkins one-at-a-time
|
593
|
+
* hash function.
|
594
|
+
*/
|
595
|
+
size_t hashmap_hash_string(const void *key)
|
596
|
+
{
|
597
|
+
const char *key_str = (const char *)key;
|
598
|
+
size_t hash = 0;
|
599
|
+
|
600
|
+
for (; *key_str; ++key_str) {
|
601
|
+
hash += *key_str;
|
602
|
+
hash += (hash << 10);
|
603
|
+
hash ^= (hash >> 6);
|
604
|
+
}
|
605
|
+
hash += (hash << 3);
|
606
|
+
hash ^= (hash >> 11);
|
607
|
+
hash += (hash << 15);
|
608
|
+
return hash;
|
609
|
+
}
|
610
|
+
|
611
|
+
/*
|
612
|
+
* Default key comparator function for string keys.
|
613
|
+
*/
|
614
|
+
int hashmap_compare_string(const void *a, const void *b)
|
615
|
+
{
|
616
|
+
return strcmp((const char *)a, (const char *)b);
|
617
|
+
}
|
618
|
+
|
619
|
+
/*
|
620
|
+
* Default key allocation function for string keys. Use free() for the
|
621
|
+
* key_free_func.
|
622
|
+
*/
|
623
|
+
void *hashmap_alloc_key_string(const void *key)
|
624
|
+
{
|
625
|
+
return (void *)strdup((const char *)key);
|
626
|
+
}
|
627
|
+
|
628
|
+
#ifdef HASHMAP_METRICS
|
629
|
+
/*
|
630
|
+
* Return the load factor.
|
631
|
+
*/
|
632
|
+
double hashmap_load_factor(const struct hashmap *map)
|
633
|
+
{
|
634
|
+
HASHMAP_ASSERT(map != NULL);
|
635
|
+
|
636
|
+
if (!map->table_size) {
|
637
|
+
return 0;
|
638
|
+
}
|
639
|
+
return (double)map->num_entries / map->table_size;
|
640
|
+
}
|
641
|
+
|
642
|
+
/*
|
643
|
+
* Return the average number of collisions per entry.
|
644
|
+
*/
|
645
|
+
double hashmap_collisions_mean(const struct hashmap *map)
|
646
|
+
{
|
647
|
+
struct hashmap_entry *entry;
|
648
|
+
size_t total_collisions = 0;
|
649
|
+
|
650
|
+
HASHMAP_ASSERT(map != NULL);
|
651
|
+
|
652
|
+
if (!map->num_entries) {
|
653
|
+
return 0;
|
654
|
+
}
|
655
|
+
for (entry = map->table; entry < &map->table[map->table_size];
|
656
|
+
++entry) {
|
657
|
+
if (!entry->key) {
|
658
|
+
continue;
|
659
|
+
}
|
660
|
+
total_collisions += entry->num_collisions;
|
661
|
+
}
|
662
|
+
return (double)total_collisions / map->num_entries;
|
663
|
+
}
|
664
|
+
|
665
|
+
/*
|
666
|
+
* Return the variance between entry collisions. The higher the variance,
|
667
|
+
* the more likely the hash function is poor and is resulting in clustering.
|
668
|
+
*/
|
669
|
+
double hashmap_collisions_variance(const struct hashmap *map)
|
670
|
+
{
|
671
|
+
struct hashmap_entry *entry;
|
672
|
+
double mean_collisions;
|
673
|
+
double variance;
|
674
|
+
double total_variance = 0;
|
675
|
+
|
676
|
+
HASHMAP_ASSERT(map != NULL);
|
677
|
+
|
678
|
+
if (!map->num_entries) {
|
679
|
+
return 0;
|
680
|
+
}
|
681
|
+
mean_collisions = hashmap_collisions_mean(map);
|
682
|
+
for (entry = map->table; entry < &map->table[map->table_size];
|
683
|
+
++entry) {
|
684
|
+
if (!entry->key) {
|
685
|
+
continue;
|
686
|
+
}
|
687
|
+
variance = (double)entry->num_collisions - mean_collisions;
|
688
|
+
total_variance += variance * variance;
|
689
|
+
}
|
690
|
+
return total_variance / map->num_entries;
|
691
|
+
}
|
692
|
+
#endif
|
@@ -0,0 +1,314 @@
|
|
1
|
+
#include "jsmn.h"
|
2
|
+
|
3
|
+
/**
|
4
|
+
* Allocates a fresh unused token from the token pull.
|
5
|
+
*/
|
6
|
+
static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser,
|
7
|
+
jsmntok_t *tokens, size_t num_tokens) {
|
8
|
+
jsmntok_t *tok;
|
9
|
+
if (parser->toknext >= num_tokens) {
|
10
|
+
return NULL;
|
11
|
+
}
|
12
|
+
tok = &tokens[parser->toknext++];
|
13
|
+
tok->start = tok->end = -1;
|
14
|
+
tok->size = 0;
|
15
|
+
#ifdef JSMN_PARENT_LINKS
|
16
|
+
tok->parent = -1;
|
17
|
+
#endif
|
18
|
+
return tok;
|
19
|
+
}
|
20
|
+
|
21
|
+
/**
|
22
|
+
* Fills token type and boundaries.
|
23
|
+
*/
|
24
|
+
static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type,
|
25
|
+
int start, int end) {
|
26
|
+
token->type = type;
|
27
|
+
token->start = start;
|
28
|
+
token->end = end;
|
29
|
+
token->size = 0;
|
30
|
+
}
|
31
|
+
|
32
|
+
/**
|
33
|
+
* Fills next available token with JSON primitive.
|
34
|
+
*/
|
35
|
+
static int jsmn_parse_primitive(jsmn_parser *parser, const char *js,
|
36
|
+
size_t len, jsmntok_t *tokens, size_t num_tokens) {
|
37
|
+
jsmntok_t *token;
|
38
|
+
int start;
|
39
|
+
|
40
|
+
start = parser->pos;
|
41
|
+
|
42
|
+
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
43
|
+
switch (js[parser->pos]) {
|
44
|
+
#ifndef JSMN_STRICT
|
45
|
+
/* In strict mode primitive must be followed by "," or "}" or "]" */
|
46
|
+
case ':':
|
47
|
+
#endif
|
48
|
+
case '\t' : case '\r' : case '\n' : case ' ' :
|
49
|
+
case ',' : case ']' : case '}' :
|
50
|
+
goto found;
|
51
|
+
}
|
52
|
+
if (js[parser->pos] < 32 || js[parser->pos] >= 127) {
|
53
|
+
parser->pos = start;
|
54
|
+
return JSMN_ERROR_INVAL;
|
55
|
+
}
|
56
|
+
}
|
57
|
+
#ifdef JSMN_STRICT
|
58
|
+
/* In strict mode primitive must be followed by a comma/object/array */
|
59
|
+
parser->pos = start;
|
60
|
+
return JSMN_ERROR_PART;
|
61
|
+
#endif
|
62
|
+
|
63
|
+
found:
|
64
|
+
if (tokens == NULL) {
|
65
|
+
parser->pos--;
|
66
|
+
return 0;
|
67
|
+
}
|
68
|
+
token = jsmn_alloc_token(parser, tokens, num_tokens);
|
69
|
+
if (token == NULL) {
|
70
|
+
parser->pos = start;
|
71
|
+
return JSMN_ERROR_NOMEM;
|
72
|
+
}
|
73
|
+
jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos);
|
74
|
+
#ifdef JSMN_PARENT_LINKS
|
75
|
+
token->parent = parser->toksuper;
|
76
|
+
#endif
|
77
|
+
parser->pos--;
|
78
|
+
return 0;
|
79
|
+
}
|
80
|
+
|
81
|
+
/**
|
82
|
+
* Fills next token with JSON string.
|
83
|
+
*/
|
84
|
+
static int jsmn_parse_string(jsmn_parser *parser, const char *js,
|
85
|
+
size_t len, jsmntok_t *tokens, size_t num_tokens) {
|
86
|
+
jsmntok_t *token;
|
87
|
+
|
88
|
+
int start = parser->pos;
|
89
|
+
|
90
|
+
parser->pos++;
|
91
|
+
|
92
|
+
/* Skip starting quote */
|
93
|
+
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
94
|
+
char c = js[parser->pos];
|
95
|
+
|
96
|
+
/* Quote: end of string */
|
97
|
+
if (c == '\"') {
|
98
|
+
if (tokens == NULL) {
|
99
|
+
return 0;
|
100
|
+
}
|
101
|
+
token = jsmn_alloc_token(parser, tokens, num_tokens);
|
102
|
+
if (token == NULL) {
|
103
|
+
parser->pos = start;
|
104
|
+
return JSMN_ERROR_NOMEM;
|
105
|
+
}
|
106
|
+
jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos);
|
107
|
+
#ifdef JSMN_PARENT_LINKS
|
108
|
+
token->parent = parser->toksuper;
|
109
|
+
#endif
|
110
|
+
return 0;
|
111
|
+
}
|
112
|
+
|
113
|
+
/* Backslash: Quoted symbol expected */
|
114
|
+
if (c == '\\' && parser->pos + 1 < len) {
|
115
|
+
int i;
|
116
|
+
parser->pos++;
|
117
|
+
switch (js[parser->pos]) {
|
118
|
+
/* Allowed escaped symbols */
|
119
|
+
case '\"': case '/' : case '\\' : case 'b' :
|
120
|
+
case 'f' : case 'r' : case 'n' : case 't' :
|
121
|
+
break;
|
122
|
+
/* Allows escaped symbol \uXXXX */
|
123
|
+
case 'u':
|
124
|
+
parser->pos++;
|
125
|
+
for(i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; i++) {
|
126
|
+
/* If it isn't a hex character we have an error */
|
127
|
+
if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */
|
128
|
+
(js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */
|
129
|
+
(js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */
|
130
|
+
parser->pos = start;
|
131
|
+
return JSMN_ERROR_INVAL;
|
132
|
+
}
|
133
|
+
parser->pos++;
|
134
|
+
}
|
135
|
+
parser->pos--;
|
136
|
+
break;
|
137
|
+
/* Unexpected symbol */
|
138
|
+
default:
|
139
|
+
parser->pos = start;
|
140
|
+
return JSMN_ERROR_INVAL;
|
141
|
+
}
|
142
|
+
}
|
143
|
+
}
|
144
|
+
parser->pos = start;
|
145
|
+
return JSMN_ERROR_PART;
|
146
|
+
}
|
147
|
+
|
148
|
+
/**
|
149
|
+
* Parse JSON string and fill tokens.
|
150
|
+
*/
|
151
|
+
int jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
|
152
|
+
jsmntok_t *tokens, unsigned int num_tokens) {
|
153
|
+
int r;
|
154
|
+
int i;
|
155
|
+
jsmntok_t *token;
|
156
|
+
int count = parser->toknext;
|
157
|
+
|
158
|
+
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
159
|
+
char c;
|
160
|
+
jsmntype_t type;
|
161
|
+
|
162
|
+
c = js[parser->pos];
|
163
|
+
switch (c) {
|
164
|
+
case '{': case '[':
|
165
|
+
count++;
|
166
|
+
if (tokens == NULL) {
|
167
|
+
break;
|
168
|
+
}
|
169
|
+
token = jsmn_alloc_token(parser, tokens, num_tokens);
|
170
|
+
if (token == NULL)
|
171
|
+
return JSMN_ERROR_NOMEM;
|
172
|
+
if (parser->toksuper != -1) {
|
173
|
+
tokens[parser->toksuper].size++;
|
174
|
+
#ifdef JSMN_PARENT_LINKS
|
175
|
+
token->parent = parser->toksuper;
|
176
|
+
#endif
|
177
|
+
}
|
178
|
+
token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY);
|
179
|
+
token->start = parser->pos;
|
180
|
+
parser->toksuper = parser->toknext - 1;
|
181
|
+
break;
|
182
|
+
case '}': case ']':
|
183
|
+
if (tokens == NULL)
|
184
|
+
break;
|
185
|
+
type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
|
186
|
+
#ifdef JSMN_PARENT_LINKS
|
187
|
+
if (parser->toknext < 1) {
|
188
|
+
return JSMN_ERROR_INVAL;
|
189
|
+
}
|
190
|
+
token = &tokens[parser->toknext - 1];
|
191
|
+
for (;;) {
|
192
|
+
if (token->start != -1 && token->end == -1) {
|
193
|
+
if (token->type != type) {
|
194
|
+
return JSMN_ERROR_INVAL;
|
195
|
+
}
|
196
|
+
token->end = parser->pos + 1;
|
197
|
+
parser->toksuper = token->parent;
|
198
|
+
break;
|
199
|
+
}
|
200
|
+
if (token->parent == -1) {
|
201
|
+
if(token->type != type || parser->toksuper == -1) {
|
202
|
+
return JSMN_ERROR_INVAL;
|
203
|
+
}
|
204
|
+
break;
|
205
|
+
}
|
206
|
+
token = &tokens[token->parent];
|
207
|
+
}
|
208
|
+
#else
|
209
|
+
for (i = parser->toknext - 1; i >= 0; i--) {
|
210
|
+
token = &tokens[i];
|
211
|
+
if (token->start != -1 && token->end == -1) {
|
212
|
+
if (token->type != type) {
|
213
|
+
return JSMN_ERROR_INVAL;
|
214
|
+
}
|
215
|
+
parser->toksuper = -1;
|
216
|
+
token->end = parser->pos + 1;
|
217
|
+
break;
|
218
|
+
}
|
219
|
+
}
|
220
|
+
/* Error if unmatched closing bracket */
|
221
|
+
if (i == -1) return JSMN_ERROR_INVAL;
|
222
|
+
for (; i >= 0; i--) {
|
223
|
+
token = &tokens[i];
|
224
|
+
if (token->start != -1 && token->end == -1) {
|
225
|
+
parser->toksuper = i;
|
226
|
+
break;
|
227
|
+
}
|
228
|
+
}
|
229
|
+
#endif
|
230
|
+
break;
|
231
|
+
case '\"':
|
232
|
+
r = jsmn_parse_string(parser, js, len, tokens, num_tokens);
|
233
|
+
if (r < 0) return r;
|
234
|
+
count++;
|
235
|
+
if (parser->toksuper != -1 && tokens != NULL)
|
236
|
+
tokens[parser->toksuper].size++;
|
237
|
+
break;
|
238
|
+
case '\t' : case '\r' : case '\n' : case ' ':
|
239
|
+
break;
|
240
|
+
case ':':
|
241
|
+
parser->toksuper = parser->toknext - 1;
|
242
|
+
break;
|
243
|
+
case ',':
|
244
|
+
if (tokens != NULL && parser->toksuper != -1 &&
|
245
|
+
tokens[parser->toksuper].type != JSMN_ARRAY &&
|
246
|
+
tokens[parser->toksuper].type != JSMN_OBJECT) {
|
247
|
+
#ifdef JSMN_PARENT_LINKS
|
248
|
+
parser->toksuper = tokens[parser->toksuper].parent;
|
249
|
+
#else
|
250
|
+
for (i = parser->toknext - 1; i >= 0; i--) {
|
251
|
+
if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) {
|
252
|
+
if (tokens[i].start != -1 && tokens[i].end == -1) {
|
253
|
+
parser->toksuper = i;
|
254
|
+
break;
|
255
|
+
}
|
256
|
+
}
|
257
|
+
}
|
258
|
+
#endif
|
259
|
+
}
|
260
|
+
break;
|
261
|
+
#ifdef JSMN_STRICT
|
262
|
+
/* In strict mode primitives are: numbers and booleans */
|
263
|
+
case '-': case '0': case '1' : case '2': case '3' : case '4':
|
264
|
+
case '5': case '6': case '7' : case '8': case '9':
|
265
|
+
case 't': case 'f': case 'n' :
|
266
|
+
/* And they must not be keys of the object */
|
267
|
+
if (tokens != NULL && parser->toksuper != -1) {
|
268
|
+
jsmntok_t *t = &tokens[parser->toksuper];
|
269
|
+
if (t->type == JSMN_OBJECT ||
|
270
|
+
(t->type == JSMN_STRING && t->size != 0)) {
|
271
|
+
return JSMN_ERROR_INVAL;
|
272
|
+
}
|
273
|
+
}
|
274
|
+
#else
|
275
|
+
/* In non-strict mode every unquoted value is a primitive */
|
276
|
+
default:
|
277
|
+
#endif
|
278
|
+
r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens);
|
279
|
+
if (r < 0) return r;
|
280
|
+
count++;
|
281
|
+
if (parser->toksuper != -1 && tokens != NULL)
|
282
|
+
tokens[parser->toksuper].size++;
|
283
|
+
break;
|
284
|
+
|
285
|
+
#ifdef JSMN_STRICT
|
286
|
+
/* Unexpected char in strict mode */
|
287
|
+
default:
|
288
|
+
return JSMN_ERROR_INVAL;
|
289
|
+
#endif
|
290
|
+
}
|
291
|
+
}
|
292
|
+
|
293
|
+
if (tokens != NULL) {
|
294
|
+
for (i = parser->toknext - 1; i >= 0; i--) {
|
295
|
+
/* Unmatched opened object or array */
|
296
|
+
if (tokens[i].start != -1 && tokens[i].end == -1) {
|
297
|
+
return JSMN_ERROR_PART;
|
298
|
+
}
|
299
|
+
}
|
300
|
+
}
|
301
|
+
|
302
|
+
return count;
|
303
|
+
}
|
304
|
+
|
305
|
+
/**
|
306
|
+
* Creates a new parser based over a given buffer with an array of tokens
|
307
|
+
* available.
|
308
|
+
*/
|
309
|
+
void jsmn_init(jsmn_parser *parser) {
|
310
|
+
parser->pos = 0;
|
311
|
+
parser->toknext = 0;
|
312
|
+
parser->toksuper = -1;
|
313
|
+
}
|
314
|
+
|
data/ext/fast_mmaped_file/mmap.c
CHANGED
@@ -1,11 +1,11 @@
|
|
1
|
+
#include "mmap.h"
|
2
|
+
|
1
3
|
#include <errno.h>
|
2
4
|
#include <fcntl.h>
|
3
|
-
#include <ruby.h>
|
4
5
|
#include <ruby/util.h>
|
5
6
|
#include <sys/mman.h>
|
6
7
|
|
7
8
|
#include "file_format.h"
|
8
|
-
#include "mmap.h"
|
9
9
|
#include "utils.h"
|
10
10
|
|
11
11
|
#if 0
|
@@ -42,7 +42,6 @@ static VALUE mm_str(VALUE obj, int modify) {
|
|
42
42
|
GET_MMAP(obj, i_mm, modify & ~MM_ORIGIN);
|
43
43
|
if (modify & MM_MODIFY) {
|
44
44
|
if (i_mm->t->flag & MM_FROZEN) rb_error_frozen("mmap");
|
45
|
-
if (!OBJ_TAINTED(ret) && rb_safe_level() >= 4) rb_raise(rb_eSecurityError, "Insecure: can't modify mmap");
|
46
45
|
}
|
47
46
|
ret = rb_obj_alloc(rb_cString);
|
48
47
|
if (rb_obj_tainted(obj)) {
|
@@ -170,6 +169,35 @@ VALUE mm_s_alloc(VALUE obj) {
|
|
170
169
|
return res;
|
171
170
|
}
|
172
171
|
|
172
|
+
size_t next_page_boundary(size_t value) {
|
173
|
+
size_t page_size = sysconf(_SC_PAGESIZE);
|
174
|
+
|
175
|
+
while (page_size < value) {
|
176
|
+
page_size *= 2;
|
177
|
+
}
|
178
|
+
|
179
|
+
return page_size;
|
180
|
+
}
|
181
|
+
|
182
|
+
/* Reference implementations:
|
183
|
+
* mozilla: https://hg.mozilla.org/mozilla-central/file/3d846420a907/xpcom/glue/FileUtils.cpp#l71
|
184
|
+
* glibc: https://github.com/lattera/glibc/blob/master/sysdeps/posix/posix_fallocate.c
|
185
|
+
*/
|
186
|
+
int reserve_mmap_file_bytes(int fd, size_t size) {
|
187
|
+
#if __linux__
|
188
|
+
/* From https://stackoverflow.com/a/22820221: The difference with
|
189
|
+
* ftruncate(2) is that (on file systems supporting it, e.g. Ext4)
|
190
|
+
* disk space is indeed reserved by posix_fallocate but ftruncate
|
191
|
+
* extends the file by adding holes (and without reserving disk
|
192
|
+
* space). */
|
193
|
+
return posix_fallocate(fd, 0, size);
|
194
|
+
#else
|
195
|
+
/* We simplify the reference implemnetations since we generally
|
196
|
+
* don't need to reserve more than a page size. */
|
197
|
+
return ftruncate(fd, size);
|
198
|
+
#endif
|
199
|
+
}
|
200
|
+
|
173
201
|
VALUE mm_init(VALUE obj, VALUE fname) {
|
174
202
|
struct stat st;
|
175
203
|
int fd, smode = 0, pmode = 0, vscope, perm, init;
|
@@ -187,13 +215,6 @@ VALUE mm_init(VALUE obj, VALUE fname) {
|
|
187
215
|
SafeStringValue(fname);
|
188
216
|
path = StringValuePtr(fname);
|
189
217
|
|
190
|
-
{
|
191
|
-
if (rb_safe_level() > 0 && OBJ_TAINTED(fname)) {
|
192
|
-
rb_raise(rb_eSecurityError, "Insecure operation");
|
193
|
-
}
|
194
|
-
rb_secure(1);
|
195
|
-
}
|
196
|
-
|
197
218
|
vscope = MAP_SHARED;
|
198
219
|
size = 0;
|
199
220
|
perm = 0666;
|
@@ -206,6 +227,7 @@ VALUE mm_init(VALUE obj, VALUE fname) {
|
|
206
227
|
}
|
207
228
|
|
208
229
|
if (fstat(fd, &st) == -1) {
|
230
|
+
close(fd);
|
209
231
|
rb_raise(rb_eArgError, "Can't stat %s", path);
|
210
232
|
}
|
211
233
|
size = st.st_size;
|
@@ -215,17 +237,21 @@ VALUE mm_init(VALUE obj, VALUE fname) {
|
|
215
237
|
offset = 0;
|
216
238
|
init = 0;
|
217
239
|
|
218
|
-
if (size == 0
|
219
|
-
if (lseek(fd, INITIAL_SIZE - 1, SEEK_END) == -1) {
|
220
|
-
rb_raise(rb_eIOError, "Can't lseek %zu", INITIAL_SIZE - 1);
|
221
|
-
}
|
222
|
-
if (write(fd, "\000", 1) != 1) {
|
223
|
-
rb_raise(rb_eIOError, "Can't extend %s", path);
|
224
|
-
}
|
240
|
+
if (size == 0) {
|
225
241
|
init = 1;
|
226
242
|
size = INITIAL_SIZE;
|
227
243
|
}
|
228
244
|
|
245
|
+
/* We need to ensure the underlying file descriptor is at least a page size.
|
246
|
+
* Otherwise, we could get a SIGBUS error if mmap() attempts to read or write
|
247
|
+
* past the file. */
|
248
|
+
size_t reserve_size = next_page_boundary(size);
|
249
|
+
|
250
|
+
if (reserve_mmap_file_bytes(fd, reserve_size) != 0) {
|
251
|
+
close(fd);
|
252
|
+
rb_raise(rb_eIOError, "Can't reserve %zu bytes for memory-mapped file in %s", reserve_size, path);
|
253
|
+
}
|
254
|
+
|
229
255
|
addr = mmap(0, size, pmode, vscope, fd, offset);
|
230
256
|
|
231
257
|
if (addr == MAP_FAILED || !addr) {
|
@@ -1,15 +1,13 @@
|
|
1
|
-
#include
|
2
|
-
#include <ruby/intern.h>
|
1
|
+
#include "value_access.h"
|
3
2
|
|
4
3
|
#include <errno.h>
|
5
4
|
#include <fcntl.h>
|
5
|
+
#include <ruby/intern.h>
|
6
6
|
#include <sys/mman.h>
|
7
7
|
#include <unistd.h>
|
8
8
|
|
9
9
|
#include "file_format.h"
|
10
10
|
#include "mmap.h"
|
11
|
-
#include "value_access.h"
|
12
|
-
|
13
11
|
#include "utils.h"
|
14
12
|
|
15
13
|
static void close_file(mm_ipc *i_mm) {
|
Binary file
|
data/lib/prometheus/client.rb
CHANGED
@@ -29,13 +29,25 @@ module Prometheus
|
|
29
29
|
configuration.pid_provider.call
|
30
30
|
end
|
31
31
|
|
32
|
+
# Resets the registry and reinitializes all metrics files.
|
33
|
+
# Use case: clean up everything in specs `before` block,
|
34
|
+
# to prevent leaking the state between specs which are updating metrics.
|
32
35
|
def reset!
|
33
36
|
@registry = nil
|
34
37
|
::Prometheus::Client::MmapedValue.reset_and_reinitialize
|
35
38
|
end
|
36
39
|
|
37
|
-
|
38
|
-
|
40
|
+
# With `force: false`: reinitializes metric files only for processes with the changed PID.
|
41
|
+
# With `force: true`: reinitializes all metrics files.
|
42
|
+
# Always keeps the registry.
|
43
|
+
# Use case (`force: false`): pick up new metric files on each worker start,
|
44
|
+
# without resetting already registered files for the master or previously initialized workers.
|
45
|
+
def reinitialize_on_pid_change(force: false)
|
46
|
+
if force
|
47
|
+
::Prometheus::Client::MmapedValue.reset_and_reinitialize
|
48
|
+
else
|
49
|
+
::Prometheus::Client::MmapedValue.reinitialize_on_pid_change
|
50
|
+
end
|
39
51
|
end
|
40
52
|
end
|
41
53
|
end
|
@@ -19,14 +19,15 @@ module Prometheus
|
|
19
19
|
end
|
20
20
|
|
21
21
|
def valid?(labels)
|
22
|
-
unless labels.
|
22
|
+
unless labels.is_a?(Hash)
|
23
23
|
raise InvalidLabelSetError, "#{labels} is not a valid label set"
|
24
24
|
end
|
25
25
|
|
26
|
-
labels.all? do |key,
|
26
|
+
labels.all? do |key, value|
|
27
27
|
validate_symbol(key)
|
28
28
|
validate_name(key)
|
29
29
|
validate_reserved_key(key)
|
30
|
+
validate_value(key, value)
|
30
31
|
end
|
31
32
|
end
|
32
33
|
|
@@ -65,6 +66,17 @@ module Prometheus
|
|
65
66
|
|
66
67
|
raise ReservedLabelError, "#{key} is reserved"
|
67
68
|
end
|
69
|
+
|
70
|
+
def validate_value(key, value)
|
71
|
+
return true if value.is_a?(String) ||
|
72
|
+
value.is_a?(Numeric) ||
|
73
|
+
value.is_a?(Symbol) ||
|
74
|
+
value.is_a?(FalseClass) ||
|
75
|
+
value.is_a?(TrueClass) ||
|
76
|
+
value.nil?
|
77
|
+
|
78
|
+
raise InvalidLabelError, "#{key} does not contain a valid value (type #{value.class})"
|
79
|
+
end
|
68
80
|
end
|
69
81
|
end
|
70
82
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: prometheus-client-mmap
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.12.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Tobias Schmidt
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2020-09-22 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: fuzzbert
|
@@ -106,6 +106,8 @@ files:
|
|
106
106
|
- ext/fast_mmaped_file/file_reading.c
|
107
107
|
- ext/fast_mmaped_file/file_reading.h
|
108
108
|
- ext/fast_mmaped_file/globals.h
|
109
|
+
- ext/fast_mmaped_file/hashmap.c
|
110
|
+
- ext/fast_mmaped_file/jsmn.c
|
109
111
|
- ext/fast_mmaped_file/mmap.c
|
110
112
|
- ext/fast_mmaped_file/mmap.h
|
111
113
|
- ext/fast_mmaped_file/rendering.c
|
@@ -114,6 +116,7 @@ files:
|
|
114
116
|
- ext/fast_mmaped_file/utils.h
|
115
117
|
- ext/fast_mmaped_file/value_access.c
|
116
118
|
- ext/fast_mmaped_file/value_access.h
|
119
|
+
- lib/fast_mmaped_file.bundle
|
117
120
|
- lib/prometheus.rb
|
118
121
|
- lib/prometheus/client.rb
|
119
122
|
- lib/prometheus/client/configuration.rb
|