prometheus-client-mmap 0.9.1 → 0.9.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/fast_mmaped_file/#file_parsing.c# +216 -0
- data/ext/fast_mmaped_file/#value_access.c# +203 -0
- data/lib/prometheus/client.rb +5 -0
- data/lib/prometheus/client/#histogram.rb# +79 -0
- data/lib/prometheus/client/#mmaped_dict.rb# +108 -0
- data/lib/prometheus/client/helper/#entry_parser.rb# +132 -0
- data/lib/prometheus/client/version.rb +1 -1
- metadata +8 -7
- data/ext/fast_mmaped_file/hashmap.c +0 -692
- data/ext/fast_mmaped_file/jsmn.c +0 -314
- data/lib/fast_mmaped_file.bundle +0 -0
- data/vendor/c/jsmn/jsmn.o +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d8752d354419c2916e63d9d02e2891093725d6d0
|
4
|
+
data.tar.gz: 2abff88dd92d618f310ede7213e061b74922c164
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 7e1f15bf7e89250e0b0fdff424a4e1fc1e06f35d333b0cb633d2b40bb53bd8d1b7cc94b8181d578369000593fe59319cb7db28f345654d0a5eee40314fd09811
|
7
|
+
data.tar.gz: 9464dd49c6fd0d273ff1790c2524798dd54b5c2adc4ed911bd5901e2c5a7ce213e78a13d7dd024cac6a1afef3af46151c8903281d77112ec42877a58d36c01c5
|
@@ -0,0 +1,216 @@
|
|
1
|
+
#include <hashmap.h>
|
2
|
+
#include <jsmn.h>
|
3
|
+
#include <ruby.h>
|
4
|
+
|
5
|
+
#include "file_format.h"
|
6
|
+
#include "file_parsing.h"
|
7
|
+
#include "globals.h"
|
8
|
+
#include "utils.h"
|
9
|
+
|
10
|
+
HASHMAP_FUNCS_CREATE(entry, const entry_t, entry_t)
|
11
|
+
|
12
|
+
typedef int (*compare_fn)(const void *a, const void *b);
|
13
|
+
|
14
|
+
static size_t hashmap_hash_entry(const entry_t *entry) {
|
15
|
+
size_t hash = 0;
|
16
|
+
|
17
|
+
for (size_t i = 0; i < entry->json_size; i++) {
|
18
|
+
hash += *(const char *)(entry->json + i);
|
19
|
+
hash += (hash << 10);
|
20
|
+
hash ^= (hash >> 6);
|
21
|
+
}
|
22
|
+
hash += (hash << 3);
|
23
|
+
hash ^= (hash >> 11);
|
24
|
+
hash += (hash << 15);
|
25
|
+
return hash;
|
26
|
+
}
|
27
|
+
|
28
|
+
static int hashmap_compare_entry(const entry_t *a, const entry_t *b) {
|
29
|
+
if (a->json_size != b->json_size) {
|
30
|
+
return -1;
|
31
|
+
}
|
32
|
+
|
33
|
+
if (is_pid_significant(a) && (rb_str_equal(a->pid, b->pid) == Qfalse)) {
|
34
|
+
return -1;
|
35
|
+
}
|
36
|
+
|
37
|
+
return strncmp(a->json, b->json, a->json_size);
|
38
|
+
}
|
39
|
+
|
40
|
+
static entry_t *copy_entry(const entry_t *entry) {
|
41
|
+
entry_t *copied = (entry_t *)malloc(sizeof(entry_t));
|
42
|
+
if (copied == NULL) {
|
43
|
+
return NULL;
|
44
|
+
}
|
45
|
+
memcpy(copied, entry, sizeof(entry_t));
|
46
|
+
|
47
|
+
copied->json = malloc(entry->json_size);
|
48
|
+
if (copied->json == NULL) {
|
49
|
+
free(copied);
|
50
|
+
return NULL;
|
51
|
+
}
|
52
|
+
|
53
|
+
memcpy(copied->json, entry->json, entry->json_size);
|
54
|
+
|
55
|
+
return copied;
|
56
|
+
}
|
57
|
+
|
58
|
+
static void entry_free(entry_t *entry) {
|
59
|
+
free(entry->json);
|
60
|
+
free(entry);
|
61
|
+
}
|
62
|
+
|
63
|
+
static void merge_entry(entry_t *found, const entry_t *entry) {
|
64
|
+
if (entry->type == sym_gauge) {
|
65
|
+
if (entry->multiprocess_mode == sym_min) {
|
66
|
+
found->value = min(found->value, entry->value);
|
67
|
+
} else if (entry->multiprocess_mode == sym_max) {
|
68
|
+
found->value = max(found->value, entry->value);
|
69
|
+
} else if (entry->multiprocess_mode == sym_livesum) {
|
70
|
+
found->value += entry->value;
|
71
|
+
} else {
|
72
|
+
found->value = entry->value;
|
73
|
+
}
|
74
|
+
} else {
|
75
|
+
found->value += entry->value;
|
76
|
+
}
|
77
|
+
}
|
78
|
+
|
79
|
+
static int process_entry(struct hashmap *map, const entry_t *entry) {
|
80
|
+
entry_t *found = entry_hashmap_get(map, entry);
|
81
|
+
if (found) {
|
82
|
+
merge_entry(found, entry);
|
83
|
+
} else {
|
84
|
+
entry_t *copy = copy_entry(entry);
|
85
|
+
if (copy == NULL) {
|
86
|
+
save_exception(rb_eNoMemError, "Failed copying metrics entry");
|
87
|
+
return 0;
|
88
|
+
}
|
89
|
+
entry_hashmap_put(map, copy, copy); // use the hashmap like hashset actually
|
90
|
+
}
|
91
|
+
return 1;
|
92
|
+
}
|
93
|
+
|
94
|
+
inline entry_t entry_new(buffer_t *source, uint32_t pos, uint32_t encoded_len, file_t *file_info) {
|
95
|
+
entry_t entry = (entry_t){.json = source->buffer + pos,
|
96
|
+
.json_size = encoded_len,
|
97
|
+
.pid = file_info->pid,
|
98
|
+
.multiprocess_mode = file_info->multiprocess_mode,
|
99
|
+
.type = file_info->type,
|
100
|
+
.name_len = 0};
|
101
|
+
|
102
|
+
uint32_t value_offset = encoded_len + padding_length(encoded_len);
|
103
|
+
memcpy(&entry.value, entry.json + value_offset, sizeof(double));
|
104
|
+
|
105
|
+
return entry;
|
106
|
+
}
|
107
|
+
|
108
|
+
static int add_parsed_name(entry_t *entry) {
|
109
|
+
jsmn_parser parser;
|
110
|
+
jsmn_init(&parser);
|
111
|
+
|
112
|
+
jsmntok_t t[2] = {(jsmntok_t){}, (jsmntok_t){}};
|
113
|
+
jsmn_parse(&parser, entry->json, entry->json_size, t, 2);
|
114
|
+
|
115
|
+
jsmntok_t *name_tok = &t[1];
|
116
|
+
|
117
|
+
if (name_tok->start < name_tok->end && name_tok->start > 0) {
|
118
|
+
entry->name = entry->json + name_tok->start;
|
119
|
+
entry->name_len = name_tok->end - name_tok->start;
|
120
|
+
return 1;
|
121
|
+
}
|
122
|
+
return 0;
|
123
|
+
}
|
124
|
+
|
125
|
+
static int entry_lexical_comparator(const entry_t **a, const entry_t **b) {
|
126
|
+
size_t min_length = min((*a)->json_size, (*b)->json_size);
|
127
|
+
return strncmp((*a)->json, (*b)->json, min_length);
|
128
|
+
}
|
129
|
+
|
130
|
+
void hashmap_setup(struct hashmap *map) {
|
131
|
+
hashmap_init(map, (size_t(*)(const void *))hashmap_hash_entry,
|
132
|
+
(int (*)(const void *, const void *))hashmap_compare_entry, 1000);
|
133
|
+
}
|
134
|
+
|
135
|
+
void entries_destroy(struct hashmap *map) {
|
136
|
+
struct hashmap_iter *iter;
|
137
|
+
for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) {
|
138
|
+
entry_t *entry = (entry_t *)entry_hashmap_iter_get_key(iter);
|
139
|
+
entry_free(entry);
|
140
|
+
}
|
141
|
+
}
|
142
|
+
|
143
|
+
int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map) {
|
144
|
+
if (source->size < START_POSITION) {
|
145
|
+
// nothing to read
|
146
|
+
return 1;[
|
147
|
+
}
|
148
|
+
uint32_t used;
|
149
|
+
memcpy(&used, source->buffer, sizeof(uint32_t));
|
150
|
+
|
151
|
+
if (used > source->size) {
|
152
|
+
save_exception(prom_eParsingError, "source file %s corrupted, used %u > file size %u", file_info->path, used,
|
153
|
+
source->size);
|
154
|
+
return 0;
|
155
|
+
}
|
156
|
+
|
157
|
+
uint32_t pos = START_POSITION;
|
158
|
+
while (pos + sizeof(uint32_t) < used) {
|
159
|
+
uint32_t encoded_len;
|
160
|
+
memcpy(&encoded_len, source->buffer + pos, sizeof(uint32_t));
|
161
|
+
pos += sizeof(uint32_t);
|
162
|
+
|
163
|
+
uint32_t value_offset = encoded_len + padding_length(encoded_len);
|
164
|
+
|
165
|
+
if (pos + value_offset + sizeof(double) > used) {
|
166
|
+
save_exception(prom_eParsingError, "source file %s corrupted, used %u < stored data length %u",
|
167
|
+
file_info->path, used, pos + value_offset + sizeof(double));
|
168
|
+
return 0;
|
169
|
+
}
|
170
|
+
entry_t entry = entry_new(source, pos, encoded_len, file_info);
|
171
|
+
|
172
|
+
if (!process_entry(map, &entry)) {
|
173
|
+
entries_destroy(map);
|
174
|
+
return 0;
|
175
|
+
}
|
176
|
+
|
177
|
+
pos += value_offset + sizeof(double);
|
178
|
+
}
|
179
|
+
return 1;
|
180
|
+
}
|
181
|
+
|
182
|
+
int sort_map_entries(const struct hashmap *map, entry_t ***sorted_entries) {
|
183
|
+
size_t num = hashmap_size(map);
|
184
|
+
|
185
|
+
size_t list_size = num * sizeof(entry_t *);
|
186
|
+
entry_t **list = malloc(list_size);
|
187
|
+
|
188
|
+
if (list == NULL) {
|
189
|
+
save_exception(rb_eNoMemError, "Couldn't allocate %zu memory", list_size);
|
190
|
+
return 0;
|
191
|
+
}
|
192
|
+
|
193
|
+
size_t cnt = 0;
|
194
|
+
struct hashmap_iter *iter;
|
195
|
+
for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) {
|
196
|
+
entry_t *entry = (entry_t *)entry_hashmap_iter_get_key(iter);
|
197
|
+
if (add_parsed_name(entry)) {
|
198
|
+
list[cnt] = entry;
|
199
|
+
cnt++;
|
200
|
+
}
|
201
|
+
}
|
202
|
+
if (cnt != num) {
|
203
|
+
save_exception(rb_eRuntimeError, "Processed entries %zu != map entries %zu", cnt, num);
|
204
|
+
free(list);
|
205
|
+
return 0;
|
206
|
+
}
|
207
|
+
|
208
|
+
qsort(list, cnt, sizeof(entry_t *), (compare_fn)&entry_lexical_comparator);
|
209
|
+
*sorted_entries = list;
|
210
|
+
return 1;
|
211
|
+
}
|
212
|
+
|
213
|
+
int is_pid_significant(const entry_t *e) {
|
214
|
+
ID mp = e->multiprocess_mode;
|
215
|
+
return e->type == sym_gauge && !(mp == sym_min || mp == sym_max || mp == sym_livesum);
|
216
|
+
}
|
@@ -0,0 +1,203 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <ruby/intern.h>
|
3
|
+
|
4
|
+
#include <errno.h>
|
5
|
+
#include <fcntl.h>
|
6
|
+
#include <sys/mman.h>
|
7
|
+
#include <unistd.h>
|
8
|
+
|
9
|
+
#include "file_format.h"
|
10
|
+
#include "mmap.h"
|
11
|
+
#include "value_access.h"
|
12
|
+
|
13
|
+
static int open_and_extend_file(mm_ipc *i_mm, size_t len) {
|
14
|
+
int fd;
|
15
|
+
|
16
|
+
if ((fd = open(i_mm->t->path, i_mm->t->smode)) == -1) {
|
17
|
+
rb_raise(rb_eArgError, "Can't open %s", i_mm->t->path);
|
18
|
+
}
|
19
|
+
|
20
|
+
if (lseek(fd, len - i_mm->t->len - 1, SEEK_END) == -1) {
|
21
|
+
close(fd);
|
22
|
+
rb_raise(rb_eIOError, "Can't lseek %zu", len - i_mm->t->len - 1);
|
23
|
+
}
|
24
|
+
|
25
|
+
if (write(fd, "\000", 1) != 1) {
|
26
|
+
close(fd);
|
27
|
+
rb_raise(rb_eIOError, "Can't extend %s", i_mm->t->path);
|
28
|
+
}
|
29
|
+
|
30
|
+
return fd;
|
31
|
+
}
|
32
|
+
|
33
|
+
static void expand(mm_ipc *i_mm, size_t len) {
|
34
|
+
if (len < i_mm->t->len) {
|
35
|
+
rb_raise(rb_eArgError, "Can't reduce the size of mmap");
|
36
|
+
}
|
37
|
+
|
38
|
+
if (munmap(i_mm->t->addr, i_mm->t->len)) {
|
39
|
+
rb_raise(rb_eArgError, "munmap failed");
|
40
|
+
}
|
41
|
+
|
42
|
+
int fd = open_and_extend_file(i_mm, len);
|
43
|
+
|
44
|
+
i_mm->t->addr = mmap(0, len, i_mm->t->pmode, i_mm->t->vscope, fd, i_mm->t->offset);
|
45
|
+
|
46
|
+
if (i_mm->t->addr == MAP_FAILED) {
|
47
|
+
close(fd);
|
48
|
+
rb_raise(rb_eArgError, "mmap failed");
|
49
|
+
}
|
50
|
+
|
51
|
+
if (close(fd) == -1) {
|
52
|
+
rb_raise(rb_eArgError, "Can't close %s", i_mm->t->path);
|
53
|
+
}
|
54
|
+
|
55
|
+
if ((i_mm->t->flag & MM_LOCK) && mlock(i_mm->t->addr, len) == -1) {
|
56
|
+
rb_raise(rb_eArgError, "mlock(%d)", errno);
|
57
|
+
}
|
58
|
+
i_mm->t->len = len;
|
59
|
+
i_mm->t->real = len;
|
60
|
+
}
|
61
|
+
|
62
|
+
static void save_entry(mm_ipc *i_mm, size_t offset, VALUE key, VALUE value) {
|
63
|
+
uint32_t key_length = (uint32_t)RSTRING_LEN(key);
|
64
|
+
|
65
|
+
char *pos = (char *)i_mm->t->addr + offset;
|
66
|
+
|
67
|
+
memcpy(pos, &key_length, sizeof(uint32_t));
|
68
|
+
pos += sizeof(uint32_t);
|
69
|
+
|
70
|
+
memmove(pos, StringValuePtr(key), key_length);
|
71
|
+
pos += key_length;
|
72
|
+
|
73
|
+
memset(pos, ' ', padding_length(key_length)); // TODO: considder padding with /0
|
74
|
+
pos += padding_length(key_length);
|
75
|
+
|
76
|
+
double val = NUM2DBL(value);
|
77
|
+
memcpy(pos, &val, sizeof(double));
|
78
|
+
}
|
79
|
+
|
80
|
+
static void save_value(mm_ipc *i_mm, VALUE _offset, VALUE value) {
|
81
|
+
Check_Type(_offset, T_FIXNUM);
|
82
|
+
size_t offset = NUM2UINT(_offset);
|
83
|
+
if ((i_mm->t->real + sizeof(double)) <= offset) {
|
84
|
+
rb_raise(rb_eIndexError, "offset %zu out of string", offset);
|
85
|
+
}
|
86
|
+
|
87
|
+
if (i_mm->t->flag & MM_FROZEN) {
|
88
|
+
rb_error_frozen("mmap");
|
89
|
+
}
|
90
|
+
|
91
|
+
char *pos = (char *)i_mm->t->addr + offset;
|
92
|
+
|
93
|
+
double val = NUM2DBL(value);
|
94
|
+
memcpy(pos, &val, sizeof(double));
|
95
|
+
}
|
96
|
+
|
97
|
+
static VALUE load_value(mm_ipc *i_mm, VALUE _offset) {
|
98
|
+
Check_Type(_offset, T_FIXNUM);
|
99
|
+
size_t offset = NUM2UINT(_offset);
|
100
|
+
if ((i_mm->t->real + sizeof(double)) <= offset) {
|
101
|
+
rb_raise(rb_eIndexError, "offset %zu out of string", offset);
|
102
|
+
}
|
103
|
+
|
104
|
+
char *pos = (char *)i_mm->t->addr + offset;
|
105
|
+
|
106
|
+
double value;
|
107
|
+
memcpy(&value, pos, sizeof(double));
|
108
|
+
return DBL2NUM(value);
|
109
|
+
}
|
110
|
+
|
111
|
+
inline uint32_t load_used(mm_ipc *i_mm) {
|
112
|
+
uint32_t used = *((uint32_t *)i_mm->t->addr);
|
113
|
+
|
114
|
+
if (used == 0) {
|
115
|
+
used = START_POSITION;
|
116
|
+
}
|
117
|
+
return used;
|
118
|
+
}
|
119
|
+
|
120
|
+
inline void save_used(mm_ipc *i_mm, uint32_t used) { *((uint32_t *)i_mm->t->addr) = used; }
|
121
|
+
|
122
|
+
static VALUE initialize_entry(mm_ipc *i_mm, VALUE positions, VALUE key, VALUE value) {
|
123
|
+
if (i_mm->t->flag & MM_FROZEN) {
|
124
|
+
rb_error_frozen("mmap");
|
125
|
+
}
|
126
|
+
|
127
|
+
if (RSTRING_LEN(key) > INT32_MAX) {
|
128
|
+
rb_raise(rb_eArgError, "string length gt %d", INT32_MAX);
|
129
|
+
}
|
130
|
+
|
131
|
+
uint32_t key_length = (uint32_t)RSTRING_LEN(key);
|
132
|
+
uint32_t value_offset = sizeof(uint32_t) + key_length + padding_length(key_length);
|
133
|
+
uint32_t entry_length = value_offset + sizeof(double);
|
134
|
+
|
135
|
+
uint32_t used = load_used(i_mm);
|
136
|
+
while (i_mm->t->len < (used + entry_length)) {
|
137
|
+
expand(i_mm, i_mm->t->len * 2);
|
138
|
+
}
|
139
|
+
save_entry(i_mm, used, key, value);
|
140
|
+
save_used(i_mm, used + entry_length);
|
141
|
+
|
142
|
+
return rb_hash_aset(positions, key, INT2NUM(used + value_offset));
|
143
|
+
}
|
144
|
+
|
145
|
+
VALUE method_fetch_entry(VALUE self, VALUE positions, VALUE key, VALUE default_value) {
|
146
|
+
Check_Type(positions, T_HASH);
|
147
|
+
Check_Type(key, T_STRING);
|
148
|
+
|
149
|
+
mm_ipc *i_mm;
|
150
|
+
GET_MMAP(self, i_mm, MM_MODIFY);
|
151
|
+
|
152
|
+
VALUE position = rb_hash_lookup(positions, key);
|
153
|
+
|
154
|
+
if (position != Qnil) {
|
155
|
+
return load_value(i_mm, position);
|
156
|
+
}
|
157
|
+
|
158
|
+
position = initialize_entry(i_mm, positions, key, default_value);
|
159
|
+
return load_value(i_mm, position);
|
160
|
+
}
|
161
|
+
|
162
|
+
VALUE method_upsert_entry(VALUE self, VALUE positions, VALUE key, VALUE value) {
|
163
|
+
Check_Type(positions, T_HASH);
|
164
|
+
Check_Type(key, T_STRING);
|
165
|
+
|
166
|
+
mm_ipc *i_mm;
|
167
|
+
GET_MMAP(self, i_mm, MM_MODIFY);
|
168
|
+
|
169
|
+
VALUE position = rb_hash_lookup(positions, key);
|
170
|
+
|
171
|
+
if (position != Qnil) {
|
172
|
+
save_value(i_mm, position, value);
|
173
|
+
return load_value(i_mm, position);
|
174
|
+
}
|
175
|
+
|
176
|
+
position = initialize_entry(i_mm, positions, key, value);
|
177
|
+
return load_value(i_mm, position);
|
178
|
+
}
|
179
|
+
|
180
|
+
VALUE method_load_used(VALUE self) {
|
181
|
+
mm_ipc *i_mm;
|
182
|
+
|
183
|
+
GET_MMAP(self, i_mm, MM_MODIFY);
|
184
|
+
return UINT2NUM(load_used(i_mm));
|
185
|
+
}
|
186
|
+
|
187
|
+
VALUE method_save_used(VALUE self, VALUE value) {
|
188
|
+
Check_Type(value, T_FIXNUM);
|
189
|
+
mm_ipc *i_mm;
|
190
|
+
|
191
|
+
GET_MMAP(self, i_mm, MM_MODIFY);
|
192
|
+
|
193
|
+
if (i_mm->t->flag & MM_FROZEN) {
|
194
|
+
rb_error_frozen("mmap");
|
195
|
+
}
|
196
|
+
|
197
|
+
if (i_mm->t->len < INITIAL_SIZE) {
|
198
|
+
expand(i_mm, INITIAL_SIZE);
|
199
|
+
}
|
200
|
+
|
201
|
+
save_used(i_mm, NUM2UINT(value));
|
202
|
+
return value;
|
203
|
+
}
|
data/lib/prometheus/client.rb
CHANGED
@@ -29,6 +29,11 @@ module Prometheus
|
|
29
29
|
configuration.pid_provider.call
|
30
30
|
end
|
31
31
|
|
32
|
+
def reset!
|
33
|
+
@registry = nil
|
34
|
+
::Prometheus::Client::MmapedValue.reset_and_reinitialize
|
35
|
+
end
|
36
|
+
|
32
37
|
def reinitialize_on_pid_change
|
33
38
|
::Prometheus::Client::MmapedValue.reinitialize_on_pid_change
|
34
39
|
end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
ip route get 1require 'prometheus/client/metric'
|
2
|
+
require 'prometheus/client/uses_value_type'
|
3
|
+
|
4
|
+
module Prometheus
|
5
|
+
module Client
|
6
|
+
# A histogram samples observations (usually things like request durations
|
7
|
+
# or response sizes) and counts them in configurable buckets. It also
|
8
|
+
# provides a sum of all observed values.
|
9
|
+
class Histogram < Metric
|
10
|
+
# Value represents the state of a Histogram at a given point.
|
11
|
+
class Value < Hash
|
12
|
+
include UsesValueType
|
13
|
+
attr_accessor :sum, :total
|
14
|
+
|
15
|
+
def initialize(type, name, labels, buckets)
|
16
|
+
@sum = value_object(type, name, "#{name}_sum", labels)
|
17
|
+
# TODO: get rid of total and use +Inf bucket instead.
|
18
|
+
@total = value_object(type, name, "#{name}_count", labels)
|
19
|
+
|
20
|
+
buckets.each do |bucket|
|
21
|
+
self[bucket] = value_object(type, name, "#{name}_bucket", labels.merge({ :le => bucket.to_s }))
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def observe(value)
|
26
|
+
@sum.increment(value)
|
27
|
+
@total.increment()
|
28
|
+
|
29
|
+
each_key do |bucket|
|
30
|
+
self[bucket].increment() if value <= bucket
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def get()
|
35
|
+
hash = {}
|
36
|
+
each_key do |bucket|
|
37
|
+
hash[bucket] = self[bucket].get()
|
38
|
+
end
|
39
|
+
hash
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# DEFAULT_BUCKETS are the default Histogram buckets. The default buckets
|
44
|
+
# are tailored to broadly measure the response time (in seconds) of a
|
45
|
+
# network service. (From DefBuckets client_golang)
|
46
|
+
DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1,
|
47
|
+
2.5, 5, 10].freeze
|
48
|
+
|
49
|
+
# Offer a way to manually specify buckets
|
50
|
+
def initialize(name, docstring, base_labels = {},
|
51
|
+
buckets = DEFAULT_BUCKETS)
|
52
|
+
raise ArgumentError, 'Unsorted buckets, typo?' unless sorted? buckets
|
53
|
+
|
54
|
+
@buckets = buckets
|
55
|
+
super(name, docstring, base_labels)
|
56
|
+
end
|
57
|
+
|
58
|
+
def type
|
59
|
+
:histogram
|
60
|
+
end
|
61
|
+
|
62
|
+
def observe(labels, value)
|
63
|
+
label_set = label_set_for(labels)
|
64
|
+
synchronize { @values[label_set].observe(value) }
|
65
|
+
end
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
def default(labels)
|
70
|
+
# TODO: default function needs to know key of hash info (label names and values)
|
71
|
+
Value.new(type, @name, labels, @buckets)
|
72
|
+
end
|
73
|
+
|
74
|
+
def sorted?(bucket)
|
75
|
+
bucket.each_cons(2).all? { |i, j| i <= j }
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|