prometheus-client-mmap 0.9.2 → 0.9.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/prometheus/client/helper/entry_parser.rb +1 -1
- data/lib/prometheus/client/version.rb +1 -1
- metadata +2 -7
- data/ext/fast_mmaped_file/#file_parsing.c# +0 -216
- data/ext/fast_mmaped_file/#value_access.c# +0 -203
- data/lib/prometheus/client/#histogram.rb# +0 -79
- data/lib/prometheus/client/#mmaped_dict.rb# +0 -108
- data/lib/prometheus/client/helper/#entry_parser.rb# +0 -132
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5e154df3b59ab07dafccd92bdcb586be4e47d55d
|
4
|
+
data.tar.gz: 3aae7936fe1cc4ed0a3b9012a161e669547a2c0f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2933cecd564d961fc08fb733f62bfd74b647ec4797f37ee9d72df9d9563373c5f639fa53687f8d99152a81bc0cf76c4b3a17b29941ba7fab25aad3df8cb5bc71
|
7
|
+
data.tar.gz: 38f8d56e76388d059961af5a31bbe421243c2d4ffd18d2ba25e6102ba8a3c5c359b4dd948269f230420713325334762b27cb876e0ac861fada331e56e7f3cc49
|
@@ -66,7 +66,7 @@ module Prometheus
|
|
66
66
|
value_offset = entry_len + padding_len # align to 8 bytes
|
67
67
|
pos += value_offset
|
68
68
|
|
69
|
-
if value_offset > 0 && (pos + VALUE_BYTES)
|
69
|
+
if value_offset > 0 && (pos + VALUE_BYTES) <= size # if positions are safe
|
70
70
|
yielder.yield data, encoded_len, value_offset, pos
|
71
71
|
else
|
72
72
|
raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: prometheus-client-mmap
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Tobias Schmidt
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2018-05-
|
12
|
+
date: 2018-05-22 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: fuzzbert
|
@@ -83,8 +83,6 @@ extensions:
|
|
83
83
|
extra_rdoc_files: []
|
84
84
|
files:
|
85
85
|
- README.md
|
86
|
-
- ext/fast_mmaped_file/#file_parsing.c#
|
87
|
-
- ext/fast_mmaped_file/#value_access.c#
|
88
86
|
- ext/fast_mmaped_file/extconf.rb
|
89
87
|
- ext/fast_mmaped_file/fast_mmaped_file.c
|
90
88
|
- ext/fast_mmaped_file/file_format.c
|
@@ -104,13 +102,10 @@ files:
|
|
104
102
|
- ext/fast_mmaped_file/value_access.h
|
105
103
|
- lib/prometheus.rb
|
106
104
|
- lib/prometheus/client.rb
|
107
|
-
- lib/prometheus/client/#histogram.rb#
|
108
|
-
- lib/prometheus/client/#mmaped_dict.rb#
|
109
105
|
- lib/prometheus/client/configuration.rb
|
110
106
|
- lib/prometheus/client/counter.rb
|
111
107
|
- lib/prometheus/client/formats/text.rb
|
112
108
|
- lib/prometheus/client/gauge.rb
|
113
|
-
- lib/prometheus/client/helper/#entry_parser.rb#
|
114
109
|
- lib/prometheus/client/helper/entry_parser.rb
|
115
110
|
- lib/prometheus/client/helper/file_locker.rb
|
116
111
|
- lib/prometheus/client/helper/json_parser.rb
|
@@ -1,216 +0,0 @@
|
|
1
|
-
#include <hashmap.h>
|
2
|
-
#include <jsmn.h>
|
3
|
-
#include <ruby.h>
|
4
|
-
|
5
|
-
#include "file_format.h"
|
6
|
-
#include "file_parsing.h"
|
7
|
-
#include "globals.h"
|
8
|
-
#include "utils.h"
|
9
|
-
|
10
|
-
HASHMAP_FUNCS_CREATE(entry, const entry_t, entry_t)
|
11
|
-
|
12
|
-
typedef int (*compare_fn)(const void *a, const void *b);
|
13
|
-
|
14
|
-
static size_t hashmap_hash_entry(const entry_t *entry) {
|
15
|
-
size_t hash = 0;
|
16
|
-
|
17
|
-
for (size_t i = 0; i < entry->json_size; i++) {
|
18
|
-
hash += *(const char *)(entry->json + i);
|
19
|
-
hash += (hash << 10);
|
20
|
-
hash ^= (hash >> 6);
|
21
|
-
}
|
22
|
-
hash += (hash << 3);
|
23
|
-
hash ^= (hash >> 11);
|
24
|
-
hash += (hash << 15);
|
25
|
-
return hash;
|
26
|
-
}
|
27
|
-
|
28
|
-
static int hashmap_compare_entry(const entry_t *a, const entry_t *b) {
|
29
|
-
if (a->json_size != b->json_size) {
|
30
|
-
return -1;
|
31
|
-
}
|
32
|
-
|
33
|
-
if (is_pid_significant(a) && (rb_str_equal(a->pid, b->pid) == Qfalse)) {
|
34
|
-
return -1;
|
35
|
-
}
|
36
|
-
|
37
|
-
return strncmp(a->json, b->json, a->json_size);
|
38
|
-
}
|
39
|
-
|
40
|
-
static entry_t *copy_entry(const entry_t *entry) {
|
41
|
-
entry_t *copied = (entry_t *)malloc(sizeof(entry_t));
|
42
|
-
if (copied == NULL) {
|
43
|
-
return NULL;
|
44
|
-
}
|
45
|
-
memcpy(copied, entry, sizeof(entry_t));
|
46
|
-
|
47
|
-
copied->json = malloc(entry->json_size);
|
48
|
-
if (copied->json == NULL) {
|
49
|
-
free(copied);
|
50
|
-
return NULL;
|
51
|
-
}
|
52
|
-
|
53
|
-
memcpy(copied->json, entry->json, entry->json_size);
|
54
|
-
|
55
|
-
return copied;
|
56
|
-
}
|
57
|
-
|
58
|
-
static void entry_free(entry_t *entry) {
|
59
|
-
free(entry->json);
|
60
|
-
free(entry);
|
61
|
-
}
|
62
|
-
|
63
|
-
static void merge_entry(entry_t *found, const entry_t *entry) {
|
64
|
-
if (entry->type == sym_gauge) {
|
65
|
-
if (entry->multiprocess_mode == sym_min) {
|
66
|
-
found->value = min(found->value, entry->value);
|
67
|
-
} else if (entry->multiprocess_mode == sym_max) {
|
68
|
-
found->value = max(found->value, entry->value);
|
69
|
-
} else if (entry->multiprocess_mode == sym_livesum) {
|
70
|
-
found->value += entry->value;
|
71
|
-
} else {
|
72
|
-
found->value = entry->value;
|
73
|
-
}
|
74
|
-
} else {
|
75
|
-
found->value += entry->value;
|
76
|
-
}
|
77
|
-
}
|
78
|
-
|
79
|
-
static int process_entry(struct hashmap *map, const entry_t *entry) {
|
80
|
-
entry_t *found = entry_hashmap_get(map, entry);
|
81
|
-
if (found) {
|
82
|
-
merge_entry(found, entry);
|
83
|
-
} else {
|
84
|
-
entry_t *copy = copy_entry(entry);
|
85
|
-
if (copy == NULL) {
|
86
|
-
save_exception(rb_eNoMemError, "Failed copying metrics entry");
|
87
|
-
return 0;
|
88
|
-
}
|
89
|
-
entry_hashmap_put(map, copy, copy); // use the hashmap like hashset actually
|
90
|
-
}
|
91
|
-
return 1;
|
92
|
-
}
|
93
|
-
|
94
|
-
inline entry_t entry_new(buffer_t *source, uint32_t pos, uint32_t encoded_len, file_t *file_info) {
|
95
|
-
entry_t entry = (entry_t){.json = source->buffer + pos,
|
96
|
-
.json_size = encoded_len,
|
97
|
-
.pid = file_info->pid,
|
98
|
-
.multiprocess_mode = file_info->multiprocess_mode,
|
99
|
-
.type = file_info->type,
|
100
|
-
.name_len = 0};
|
101
|
-
|
102
|
-
uint32_t value_offset = encoded_len + padding_length(encoded_len);
|
103
|
-
memcpy(&entry.value, entry.json + value_offset, sizeof(double));
|
104
|
-
|
105
|
-
return entry;
|
106
|
-
}
|
107
|
-
|
108
|
-
static int add_parsed_name(entry_t *entry) {
|
109
|
-
jsmn_parser parser;
|
110
|
-
jsmn_init(&parser);
|
111
|
-
|
112
|
-
jsmntok_t t[2] = {(jsmntok_t){}, (jsmntok_t){}};
|
113
|
-
jsmn_parse(&parser, entry->json, entry->json_size, t, 2);
|
114
|
-
|
115
|
-
jsmntok_t *name_tok = &t[1];
|
116
|
-
|
117
|
-
if (name_tok->start < name_tok->end && name_tok->start > 0) {
|
118
|
-
entry->name = entry->json + name_tok->start;
|
119
|
-
entry->name_len = name_tok->end - name_tok->start;
|
120
|
-
return 1;
|
121
|
-
}
|
122
|
-
return 0;
|
123
|
-
}
|
124
|
-
|
125
|
-
static int entry_lexical_comparator(const entry_t **a, const entry_t **b) {
|
126
|
-
size_t min_length = min((*a)->json_size, (*b)->json_size);
|
127
|
-
return strncmp((*a)->json, (*b)->json, min_length);
|
128
|
-
}
|
129
|
-
|
130
|
-
void hashmap_setup(struct hashmap *map) {
|
131
|
-
hashmap_init(map, (size_t(*)(const void *))hashmap_hash_entry,
|
132
|
-
(int (*)(const void *, const void *))hashmap_compare_entry, 1000);
|
133
|
-
}
|
134
|
-
|
135
|
-
void entries_destroy(struct hashmap *map) {
|
136
|
-
struct hashmap_iter *iter;
|
137
|
-
for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) {
|
138
|
-
entry_t *entry = (entry_t *)entry_hashmap_iter_get_key(iter);
|
139
|
-
entry_free(entry);
|
140
|
-
}
|
141
|
-
}
|
142
|
-
|
143
|
-
int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map) {
|
144
|
-
if (source->size < START_POSITION) {
|
145
|
-
// nothing to read
|
146
|
-
return 1;[
|
147
|
-
}
|
148
|
-
uint32_t used;
|
149
|
-
memcpy(&used, source->buffer, sizeof(uint32_t));
|
150
|
-
|
151
|
-
if (used > source->size) {
|
152
|
-
save_exception(prom_eParsingError, "source file %s corrupted, used %u > file size %u", file_info->path, used,
|
153
|
-
source->size);
|
154
|
-
return 0;
|
155
|
-
}
|
156
|
-
|
157
|
-
uint32_t pos = START_POSITION;
|
158
|
-
while (pos + sizeof(uint32_t) < used) {
|
159
|
-
uint32_t encoded_len;
|
160
|
-
memcpy(&encoded_len, source->buffer + pos, sizeof(uint32_t));
|
161
|
-
pos += sizeof(uint32_t);
|
162
|
-
|
163
|
-
uint32_t value_offset = encoded_len + padding_length(encoded_len);
|
164
|
-
|
165
|
-
if (pos + value_offset + sizeof(double) > used) {
|
166
|
-
save_exception(prom_eParsingError, "source file %s corrupted, used %u < stored data length %u",
|
167
|
-
file_info->path, used, pos + value_offset + sizeof(double));
|
168
|
-
return 0;
|
169
|
-
}
|
170
|
-
entry_t entry = entry_new(source, pos, encoded_len, file_info);
|
171
|
-
|
172
|
-
if (!process_entry(map, &entry)) {
|
173
|
-
entries_destroy(map);
|
174
|
-
return 0;
|
175
|
-
}
|
176
|
-
|
177
|
-
pos += value_offset + sizeof(double);
|
178
|
-
}
|
179
|
-
return 1;
|
180
|
-
}
|
181
|
-
|
182
|
-
int sort_map_entries(const struct hashmap *map, entry_t ***sorted_entries) {
|
183
|
-
size_t num = hashmap_size(map);
|
184
|
-
|
185
|
-
size_t list_size = num * sizeof(entry_t *);
|
186
|
-
entry_t **list = malloc(list_size);
|
187
|
-
|
188
|
-
if (list == NULL) {
|
189
|
-
save_exception(rb_eNoMemError, "Couldn't allocate %zu memory", list_size);
|
190
|
-
return 0;
|
191
|
-
}
|
192
|
-
|
193
|
-
size_t cnt = 0;
|
194
|
-
struct hashmap_iter *iter;
|
195
|
-
for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) {
|
196
|
-
entry_t *entry = (entry_t *)entry_hashmap_iter_get_key(iter);
|
197
|
-
if (add_parsed_name(entry)) {
|
198
|
-
list[cnt] = entry;
|
199
|
-
cnt++;
|
200
|
-
}
|
201
|
-
}
|
202
|
-
if (cnt != num) {
|
203
|
-
save_exception(rb_eRuntimeError, "Processed entries %zu != map entries %zu", cnt, num);
|
204
|
-
free(list);
|
205
|
-
return 0;
|
206
|
-
}
|
207
|
-
|
208
|
-
qsort(list, cnt, sizeof(entry_t *), (compare_fn)&entry_lexical_comparator);
|
209
|
-
*sorted_entries = list;
|
210
|
-
return 1;
|
211
|
-
}
|
212
|
-
|
213
|
-
int is_pid_significant(const entry_t *e) {
|
214
|
-
ID mp = e->multiprocess_mode;
|
215
|
-
return e->type == sym_gauge && !(mp == sym_min || mp == sym_max || mp == sym_livesum);
|
216
|
-
}
|
@@ -1,203 +0,0 @@
|
|
1
|
-
#include <ruby.h>
|
2
|
-
#include <ruby/intern.h>
|
3
|
-
|
4
|
-
#include <errno.h>
|
5
|
-
#include <fcntl.h>
|
6
|
-
#include <sys/mman.h>
|
7
|
-
#include <unistd.h>
|
8
|
-
|
9
|
-
#include "file_format.h"
|
10
|
-
#include "mmap.h"
|
11
|
-
#include "value_access.h"
|
12
|
-
|
13
|
-
static int open_and_extend_file(mm_ipc *i_mm, size_t len) {
|
14
|
-
int fd;
|
15
|
-
|
16
|
-
if ((fd = open(i_mm->t->path, i_mm->t->smode)) == -1) {
|
17
|
-
rb_raise(rb_eArgError, "Can't open %s", i_mm->t->path);
|
18
|
-
}
|
19
|
-
|
20
|
-
if (lseek(fd, len - i_mm->t->len - 1, SEEK_END) == -1) {
|
21
|
-
close(fd);
|
22
|
-
rb_raise(rb_eIOError, "Can't lseek %zu", len - i_mm->t->len - 1);
|
23
|
-
}
|
24
|
-
|
25
|
-
if (write(fd, "\000", 1) != 1) {
|
26
|
-
close(fd);
|
27
|
-
rb_raise(rb_eIOError, "Can't extend %s", i_mm->t->path);
|
28
|
-
}
|
29
|
-
|
30
|
-
return fd;
|
31
|
-
}
|
32
|
-
|
33
|
-
static void expand(mm_ipc *i_mm, size_t len) {
|
34
|
-
if (len < i_mm->t->len) {
|
35
|
-
rb_raise(rb_eArgError, "Can't reduce the size of mmap");
|
36
|
-
}
|
37
|
-
|
38
|
-
if (munmap(i_mm->t->addr, i_mm->t->len)) {
|
39
|
-
rb_raise(rb_eArgError, "munmap failed");
|
40
|
-
}
|
41
|
-
|
42
|
-
int fd = open_and_extend_file(i_mm, len);
|
43
|
-
|
44
|
-
i_mm->t->addr = mmap(0, len, i_mm->t->pmode, i_mm->t->vscope, fd, i_mm->t->offset);
|
45
|
-
|
46
|
-
if (i_mm->t->addr == MAP_FAILED) {
|
47
|
-
close(fd);
|
48
|
-
rb_raise(rb_eArgError, "mmap failed");
|
49
|
-
}
|
50
|
-
|
51
|
-
if (close(fd) == -1) {
|
52
|
-
rb_raise(rb_eArgError, "Can't close %s", i_mm->t->path);
|
53
|
-
}
|
54
|
-
|
55
|
-
if ((i_mm->t->flag & MM_LOCK) && mlock(i_mm->t->addr, len) == -1) {
|
56
|
-
rb_raise(rb_eArgError, "mlock(%d)", errno);
|
57
|
-
}
|
58
|
-
i_mm->t->len = len;
|
59
|
-
i_mm->t->real = len;
|
60
|
-
}
|
61
|
-
|
62
|
-
static void save_entry(mm_ipc *i_mm, size_t offset, VALUE key, VALUE value) {
|
63
|
-
uint32_t key_length = (uint32_t)RSTRING_LEN(key);
|
64
|
-
|
65
|
-
char *pos = (char *)i_mm->t->addr + offset;
|
66
|
-
|
67
|
-
memcpy(pos, &key_length, sizeof(uint32_t));
|
68
|
-
pos += sizeof(uint32_t);
|
69
|
-
|
70
|
-
memmove(pos, StringValuePtr(key), key_length);
|
71
|
-
pos += key_length;
|
72
|
-
|
73
|
-
memset(pos, ' ', padding_length(key_length)); // TODO: considder padding with /0
|
74
|
-
pos += padding_length(key_length);
|
75
|
-
|
76
|
-
double val = NUM2DBL(value);
|
77
|
-
memcpy(pos, &val, sizeof(double));
|
78
|
-
}
|
79
|
-
|
80
|
-
static void save_value(mm_ipc *i_mm, VALUE _offset, VALUE value) {
|
81
|
-
Check_Type(_offset, T_FIXNUM);
|
82
|
-
size_t offset = NUM2UINT(_offset);
|
83
|
-
if ((i_mm->t->real + sizeof(double)) <= offset) {
|
84
|
-
rb_raise(rb_eIndexError, "offset %zu out of string", offset);
|
85
|
-
}
|
86
|
-
|
87
|
-
if (i_mm->t->flag & MM_FROZEN) {
|
88
|
-
rb_error_frozen("mmap");
|
89
|
-
}
|
90
|
-
|
91
|
-
char *pos = (char *)i_mm->t->addr + offset;
|
92
|
-
|
93
|
-
double val = NUM2DBL(value);
|
94
|
-
memcpy(pos, &val, sizeof(double));
|
95
|
-
}
|
96
|
-
|
97
|
-
static VALUE load_value(mm_ipc *i_mm, VALUE _offset) {
|
98
|
-
Check_Type(_offset, T_FIXNUM);
|
99
|
-
size_t offset = NUM2UINT(_offset);
|
100
|
-
if ((i_mm->t->real + sizeof(double)) <= offset) {
|
101
|
-
rb_raise(rb_eIndexError, "offset %zu out of string", offset);
|
102
|
-
}
|
103
|
-
|
104
|
-
char *pos = (char *)i_mm->t->addr + offset;
|
105
|
-
|
106
|
-
double value;
|
107
|
-
memcpy(&value, pos, sizeof(double));
|
108
|
-
return DBL2NUM(value);
|
109
|
-
}
|
110
|
-
|
111
|
-
inline uint32_t load_used(mm_ipc *i_mm) {
|
112
|
-
uint32_t used = *((uint32_t *)i_mm->t->addr);
|
113
|
-
|
114
|
-
if (used == 0) {
|
115
|
-
used = START_POSITION;
|
116
|
-
}
|
117
|
-
return used;
|
118
|
-
}
|
119
|
-
|
120
|
-
inline void save_used(mm_ipc *i_mm, uint32_t used) { *((uint32_t *)i_mm->t->addr) = used; }
|
121
|
-
|
122
|
-
static VALUE initialize_entry(mm_ipc *i_mm, VALUE positions, VALUE key, VALUE value) {
|
123
|
-
if (i_mm->t->flag & MM_FROZEN) {
|
124
|
-
rb_error_frozen("mmap");
|
125
|
-
}
|
126
|
-
|
127
|
-
if (RSTRING_LEN(key) > INT32_MAX) {
|
128
|
-
rb_raise(rb_eArgError, "string length gt %d", INT32_MAX);
|
129
|
-
}
|
130
|
-
|
131
|
-
uint32_t key_length = (uint32_t)RSTRING_LEN(key);
|
132
|
-
uint32_t value_offset = sizeof(uint32_t) + key_length + padding_length(key_length);
|
133
|
-
uint32_t entry_length = value_offset + sizeof(double);
|
134
|
-
|
135
|
-
uint32_t used = load_used(i_mm);
|
136
|
-
while (i_mm->t->len < (used + entry_length)) {
|
137
|
-
expand(i_mm, i_mm->t->len * 2);
|
138
|
-
}
|
139
|
-
save_entry(i_mm, used, key, value);
|
140
|
-
save_used(i_mm, used + entry_length);
|
141
|
-
|
142
|
-
return rb_hash_aset(positions, key, INT2NUM(used + value_offset));
|
143
|
-
}
|
144
|
-
|
145
|
-
VALUE method_fetch_entry(VALUE self, VALUE positions, VALUE key, VALUE default_value) {
|
146
|
-
Check_Type(positions, T_HASH);
|
147
|
-
Check_Type(key, T_STRING);
|
148
|
-
|
149
|
-
mm_ipc *i_mm;
|
150
|
-
GET_MMAP(self, i_mm, MM_MODIFY);
|
151
|
-
|
152
|
-
VALUE position = rb_hash_lookup(positions, key);
|
153
|
-
|
154
|
-
if (position != Qnil) {
|
155
|
-
return load_value(i_mm, position);
|
156
|
-
}
|
157
|
-
|
158
|
-
position = initialize_entry(i_mm, positions, key, default_value);
|
159
|
-
return load_value(i_mm, position);
|
160
|
-
}
|
161
|
-
|
162
|
-
VALUE method_upsert_entry(VALUE self, VALUE positions, VALUE key, VALUE value) {
|
163
|
-
Check_Type(positions, T_HASH);
|
164
|
-
Check_Type(key, T_STRING);
|
165
|
-
|
166
|
-
mm_ipc *i_mm;
|
167
|
-
GET_MMAP(self, i_mm, MM_MODIFY);
|
168
|
-
|
169
|
-
VALUE position = rb_hash_lookup(positions, key);
|
170
|
-
|
171
|
-
if (position != Qnil) {
|
172
|
-
save_value(i_mm, position, value);
|
173
|
-
return load_value(i_mm, position);
|
174
|
-
}
|
175
|
-
|
176
|
-
position = initialize_entry(i_mm, positions, key, value);
|
177
|
-
return load_value(i_mm, position);
|
178
|
-
}
|
179
|
-
|
180
|
-
VALUE method_load_used(VALUE self) {
|
181
|
-
mm_ipc *i_mm;
|
182
|
-
|
183
|
-
GET_MMAP(self, i_mm, MM_MODIFY);
|
184
|
-
return UINT2NUM(load_used(i_mm));
|
185
|
-
}
|
186
|
-
|
187
|
-
VALUE method_save_used(VALUE self, VALUE value) {
|
188
|
-
Check_Type(value, T_FIXNUM);
|
189
|
-
mm_ipc *i_mm;
|
190
|
-
|
191
|
-
GET_MMAP(self, i_mm, MM_MODIFY);
|
192
|
-
|
193
|
-
if (i_mm->t->flag & MM_FROZEN) {
|
194
|
-
rb_error_frozen("mmap");
|
195
|
-
}
|
196
|
-
|
197
|
-
if (i_mm->t->len < INITIAL_SIZE) {
|
198
|
-
expand(i_mm, INITIAL_SIZE);
|
199
|
-
}
|
200
|
-
|
201
|
-
save_used(i_mm, NUM2UINT(value));
|
202
|
-
return value;
|
203
|
-
}
|
@@ -1,79 +0,0 @@
|
|
1
|
-
ip route get 1require 'prometheus/client/metric'
|
2
|
-
require 'prometheus/client/uses_value_type'
|
3
|
-
|
4
|
-
module Prometheus
|
5
|
-
module Client
|
6
|
-
# A histogram samples observations (usually things like request durations
|
7
|
-
# or response sizes) and counts them in configurable buckets. It also
|
8
|
-
# provides a sum of all observed values.
|
9
|
-
class Histogram < Metric
|
10
|
-
# Value represents the state of a Histogram at a given point.
|
11
|
-
class Value < Hash
|
12
|
-
include UsesValueType
|
13
|
-
attr_accessor :sum, :total
|
14
|
-
|
15
|
-
def initialize(type, name, labels, buckets)
|
16
|
-
@sum = value_object(type, name, "#{name}_sum", labels)
|
17
|
-
# TODO: get rid of total and use +Inf bucket instead.
|
18
|
-
@total = value_object(type, name, "#{name}_count", labels)
|
19
|
-
|
20
|
-
buckets.each do |bucket|
|
21
|
-
self[bucket] = value_object(type, name, "#{name}_bucket", labels.merge({ :le => bucket.to_s }))
|
22
|
-
end
|
23
|
-
end
|
24
|
-
|
25
|
-
def observe(value)
|
26
|
-
@sum.increment(value)
|
27
|
-
@total.increment()
|
28
|
-
|
29
|
-
each_key do |bucket|
|
30
|
-
self[bucket].increment() if value <= bucket
|
31
|
-
end
|
32
|
-
end
|
33
|
-
|
34
|
-
def get()
|
35
|
-
hash = {}
|
36
|
-
each_key do |bucket|
|
37
|
-
hash[bucket] = self[bucket].get()
|
38
|
-
end
|
39
|
-
hash
|
40
|
-
end
|
41
|
-
end
|
42
|
-
|
43
|
-
# DEFAULT_BUCKETS are the default Histogram buckets. The default buckets
|
44
|
-
# are tailored to broadly measure the response time (in seconds) of a
|
45
|
-
# network service. (From DefBuckets client_golang)
|
46
|
-
DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1,
|
47
|
-
2.5, 5, 10].freeze
|
48
|
-
|
49
|
-
# Offer a way to manually specify buckets
|
50
|
-
def initialize(name, docstring, base_labels = {},
|
51
|
-
buckets = DEFAULT_BUCKETS)
|
52
|
-
raise ArgumentError, 'Unsorted buckets, typo?' unless sorted? buckets
|
53
|
-
|
54
|
-
@buckets = buckets
|
55
|
-
super(name, docstring, base_labels)
|
56
|
-
end
|
57
|
-
|
58
|
-
def type
|
59
|
-
:histogram
|
60
|
-
end
|
61
|
-
|
62
|
-
def observe(labels, value)
|
63
|
-
label_set = label_set_for(labels)
|
64
|
-
synchronize { @values[label_set].observe(value) }
|
65
|
-
end
|
66
|
-
|
67
|
-
private
|
68
|
-
|
69
|
-
def default(labels)
|
70
|
-
# TODO: default function needs to know key of hash info (label names and values)
|
71
|
-
Value.new(type, @name, labels, @buckets)
|
72
|
-
end
|
73
|
-
|
74
|
-
def sorted?(bucket)
|
75
|
-
bucket.each_cons(2).all? { |i, j| i <= j }
|
76
|
-
end
|
77
|
-
end
|
78
|
-
end
|
79
|
-
end
|
@@ -1,108 +0,0 @@
|
|
1
|
-
require 'prometheus/client/helper/mmaped_file'
|
2
|
-
require 'prometheus/client'
|
3
|
-
|
4
|
-
module Prometheus
|
5
|
-
module Client
|
6
|
-
class ParsingError < StandardError
|
7
|
-
end
|
8
|
-
|
9
|
-
# A dict of doubles, backed by an mmapped file.
|
10
|
-
#
|
11
|
-
# The file starts with a 4 byte int, indicating how much of it is used.
|
12
|
-
# Then 4 bytes of padding.
|
13
|
-
# There's then a number of entries, consisting of a 4 byte int which is the
|
14
|
-
# size of the next field, a utf-8 encoded string key, padding to an 8 byte
|
15
|
-
# alignment, and then a 8 byte float which is the value.
|
16
|
-
#
|
17
|
-
# For example, for :
|
18
|
-
#
|
19
|
-
# 0: | PID (4 bytes) | Used bytes (4 bytes) |
|
20
|
-
# 1: | Padding (4 bytes) | Size of field (4 bytes) |
|
21
|
-
# 2:
|
22
|
-
# 2: | Entry 1 key (N bytes) | Padding (4 bytes) |
|
23
|
-
# 3: | Entry 1 value (8 bytes) |
|
24
|
-
# 4: | Size of field 2 | Padding (4 bytes) |
|
25
|
-
# 5: | Entry 2 key (4 bytes) | Padding (4 byte) |
|
26
|
-
# 6: | Entry 2 value (8 bytes) |
|
27
|
-
class MmapedDict
|
28
|
-
MINIMUM_SIZE = 8
|
29
|
-
attr_reader :m, :used, :positions
|
30
|
-
|
31
|
-
def initialize(m)
|
32
|
-
@mutex = Mutex.new
|
33
|
-
|
34
|
-
@m = m
|
35
|
-
# @m.mlock # TODO: Ensure memory is locked to RAM
|
36
|
-
|
37
|
-
@used = @m.used
|
38
|
-
|
39
|
-
@positions = {}
|
40
|
-
read_all_positions.each do |key, pos|
|
41
|
-
@positions[key] = pos
|
42
|
-
end
|
43
|
-
rescue StandardError => e
|
44
|
-
raise ParsingError, "exception #{e} while processing metrics file #{path}"
|
45
|
-
end
|
46
|
-
|
47
|
-
# Yield (key, value). No locking is performed.
|
48
|
-
def self.read_all_values(f)
|
49
|
-
m = Helper::MmapedFile.open(f)
|
50
|
-
|
51
|
-
m.entries.map do |data, encoded_len, value_offset, _|
|
52
|
-
encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset))
|
53
|
-
[encoded, value]
|
54
|
-
end
|
55
|
-
ensure
|
56
|
-
m.munmap
|
57
|
-
end
|
58
|
-
|
59
|
-
def read_value(key)
|
60
|
-
@mutex.synchronize do
|
61
|
-
init_value(key) unless @positions.key?(key)
|
62
|
-
end
|
63
|
-
pos = @positions[key]
|
64
|
-
# We assume that reading from an 8 byte aligned value is atomic.
|
65
|
-
@m[pos..pos + 7].unpack('d')[0]
|
66
|
-
end
|
67
|
-
|
68
|
-
def write_value(key, value)
|
69
|
-
@mutex.synchronize do
|
70
|
-
init_value(key) unless @positions.key?(key)
|
71
|
-
end
|
72
|
-
pos = @positions[key]
|
73
|
-
# We assume that writing to an 8 byte aligned value is atomic.
|
74
|
-
@m[pos..pos + 7] = [value].pack('d')
|
75
|
-
end
|
76
|
-
|
77
|
-
def path
|
78
|
-
@m.filepath unless @m.nil?
|
79
|
-
end
|
80
|
-
|
81
|
-
def close
|
82
|
-
@m.close
|
83
|
-
rescue TypeError => e
|
84
|
-
Prometheus::Client.logger.warn("munmap raised error #{e}")
|
85
|
-
end
|
86
|
-
|
87
|
-
private
|
88
|
-
|
89
|
-
# Initialize a value. Lock must be held by caller.
|
90
|
-
def init_value(key)
|
91
|
-
@m.add_entry(key, 0.0)
|
92
|
-
|
93
|
-
# Update how much space we've used.
|
94
|
-
@used = @m.used
|
95
|
-
|
96
|
-
@positions[key] = @used - 8
|
97
|
-
end
|
98
|
-
|
99
|
-
# Yield (key, pos). No locking is performed.
|
100
|
-
def read_all_positions
|
101
|
-
@m.entries.map do |data, encoded_len, _, absolute_pos|
|
102
|
-
encoded, = data.unpack(format('@4A%d', encoded_len))
|
103
|
-
[encoded, absolute_pos]
|
104
|
-
end
|
105
|
-
end
|
106
|
-
end
|
107
|
-
end
|
108
|
-
end
|
@@ -1,132 +0,0 @@
|
|
1
|
-
require 'prometheus/client/helper/json_parser'
|
2
|
-
|
3
|
-
module Prometheus
|
4
|
-
module Client
|
5
|
-
module Helper
|
6
|
-
module EntryParser
|
7
|
-
class ParsingError < RuntimeError;
|
8
|
-
end
|
9
|
-
|
10
|
-
MINIMUM_SIZE = 8
|
11
|
-
START_POSITION = 8
|
12
|
-
VALUE_BYTES = 8
|
13
|
-
ENCODED_LENGTH_BYTES = 4
|
14
|
-
|
15
|
-
def used
|
16
|
-
slice(0..3).unpack('l')[0]
|
17
|
-
end
|
18
|
-
|
19
|
-
def parts
|
20
|
-
@parts ||= File.basename(filepath, '.db')
|
21
|
-
.split('_')
|
22
|
-
.map { |e| e.gsub(/-\d+$/, '') } # remove trailing -number
|
23
|
-
end
|
24
|
-
|
25
|
-
def type
|
26
|
-
parts[0].to_sym
|
27
|
-
end
|
28
|
-
|
29
|
-
def pid
|
30
|
-
parts[2..-1].join('_')
|
31
|
-
end
|
32
|
-
|
33
|
-
def multiprocess_mode
|
34
|
-
parts[1]
|
35
|
-
end
|
36
|
-
|
37
|
-
def empty?
|
38
|
-
size < MINIMUM_SIZE || used.zero?
|
39
|
-
end
|
40
|
-
|
41
|
-
def entries(ignore_errors = false)
|
42
|
-
return Enumerator.new {} if empty?
|
43
|
-
|
44
|
-
Enumerator.new do |yielder|
|
45
|
-
used_ = used # cache used to avoid unnecessary unpack operations
|
46
|
-
|
47
|
-
pos = START_POSITION # used + padding offset
|
48
|
-
while pos < used_ && pos < size && pos > 0
|
49
|
-
data = slice(pos..-1)
|
50
|
-
unless data
|
51
|
-
raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors
|
52
|
-
pos += 8c
|
53
|
-
next
|
54
|
-
end
|
55
|
-
|
56
|
-
encoded_len, first_encoded_bytes = data.unpack('LL')
|
57
|
-
if encoded_len.nil? || encoded_len.zero? || first_encoded_bytes.nil? || first_encoded_bytes.zero?
|
58
|
-
# do not parse empty data
|
59
|
-
pos += 8
|
60
|
-
next
|
61
|
-
end
|
62
|
-
|
63
|
-
entry_len = ENCODED_LENGTH_BYTES + encoded_len
|
64
|
-
padding_len = 8 - entry_len % 8
|
65
|
-
|
66
|
-
value_offset = entry_len + padding_len # align to 8 bytes
|
67
|
-
pos += value_offset
|
68
|
-
|
69
|
-
if value_offset > 0 && (pos + VALUE_BYTES) < size # if positions are safe
|
70
|
-
yielder.yield data, encoded_len, value_offset, pos
|
71
|
-
else
|
72
|
-
raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors
|
73
|
-
end
|
74
|
-
pos += VALUE_BYTES
|
75
|
-
end
|
76
|
-
end
|
77
|
-
end
|
78
|
-
|
79
|
-
def parsed_entries(ignore_errors = false)
|
80
|
-
result = entries(ignore_errors).map do |data, encoded_len, value_offset, _|
|
81
|
-
begin
|
82
|
-
encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset))
|
83
|
-
[encoded, value]
|
84
|
-
rescue ArgumentError => e
|
85
|
-
Prometheus::Client.logger.debug("Error processing data: #{bin_to_hex(data[0, 7])} len: #{encoded_len} value_offset: #{value_offset}")
|
86
|
-
raise ParsingError, e unless ignore_errors
|
87
|
-
end
|
88
|
-
end
|
89
|
-
|
90
|
-
metrics.reject!(&:nil?) if ignore_errors
|
91
|
-
result
|
92
|
-
end
|
93
|
-
|
94
|
-
def to_metrics(metrics = {}, ignore_errors = false)
|
95
|
-
parsed_entries(ignore_errors).each do |key, value|
|
96
|
-
begin
|
97
|
-
metric_name, name, labelnames, labelvalues = JsonParser.load(key)
|
98
|
-
labelnames ||= []
|
99
|
-
labelvalues ||= []
|
100
|
-
|
101
|
-
metric = metrics.fetch(metric_name,
|
102
|
-
metric_name: metric_name,
|
103
|
-
help: 'Multiprocess metric',
|
104
|
-
type: type,
|
105
|
-
samples: [])
|
106
|
-
if type == :gauge
|
107
|
-
metric[:multiprocess_mode] = multiprocess_mode
|
108
|
-
metric[:samples] += [[name, labelnames.zip(labelvalues) + [['pid', pid]], value]]
|
109
|
-
else
|
110
|
-
# The duplicates and labels are fixed in the next for.
|
111
|
-
metric[:samples] += [[name, labelnames.zip(labelvalues), value]]
|
112
|
-
end
|
113
|
-
metrics[metric_name] = metric
|
114
|
-
|
115
|
-
rescue JSON::ParserError => e
|
116
|
-
raise ParsingError(e) unless ignore_errors
|
117
|
-
end
|
118
|
-
end
|
119
|
-
|
120
|
-
metrics.reject!(&:nil?) if ignore_errors
|
121
|
-
metrics
|
122
|
-
end
|
123
|
-
|
124
|
-
private
|
125
|
-
|
126
|
-
def bin_to_hex(s)
|
127
|
-
s.each_byte.map { |b| b.to_s(16) }.join
|
128
|
-
end
|
129
|
-
end
|
130
|
-
end
|
131
|
-
end
|
132
|
-
end
|