raindrops-maintained 0.21.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.document +7 -0
- data/.gitattributes +4 -0
- data/.gitignore +16 -0
- data/.manifest +62 -0
- data/.olddoc.yml +16 -0
- data/COPYING +165 -0
- data/GIT-VERSION-FILE +1 -0
- data/GIT-VERSION-GEN +40 -0
- data/GNUmakefile +4 -0
- data/LATEST +9 -0
- data/LICENSE +16 -0
- data/NEWS +384 -0
- data/README +101 -0
- data/TODO +3 -0
- data/archive/.gitignore +3 -0
- data/archive/slrnpull.conf +4 -0
- data/examples/linux-listener-stats.rb +122 -0
- data/examples/middleware.ru +5 -0
- data/examples/watcher.ru +4 -0
- data/examples/watcher_demo.ru +13 -0
- data/examples/yahns.conf.rb +30 -0
- data/examples/zbatery.conf.rb +16 -0
- data/ext/raindrops/extconf.rb +163 -0
- data/ext/raindrops/linux_inet_diag.c +713 -0
- data/ext/raindrops/my_fileno.h +16 -0
- data/ext/raindrops/raindrops.c +487 -0
- data/ext/raindrops/raindrops_atomic.h +23 -0
- data/ext/raindrops/tcp_info.c +245 -0
- data/lib/raindrops/aggregate/last_data_recv.rb +94 -0
- data/lib/raindrops/aggregate/pmq.rb +245 -0
- data/lib/raindrops/aggregate.rb +8 -0
- data/lib/raindrops/last_data_recv.rb +102 -0
- data/lib/raindrops/linux.rb +77 -0
- data/lib/raindrops/middleware/proxy.rb +40 -0
- data/lib/raindrops/middleware.rb +153 -0
- data/lib/raindrops/struct.rb +62 -0
- data/lib/raindrops/watcher.rb +428 -0
- data/lib/raindrops.rb +72 -0
- data/pkg.mk +151 -0
- data/raindrops-maintained.gemspec +1 -0
- data/raindrops.gemspec +26 -0
- data/setup.rb +1586 -0
- data/test/ipv6_enabled.rb +9 -0
- data/test/rack_unicorn.rb +11 -0
- data/test/test_aggregate_pmq.rb +65 -0
- data/test/test_inet_diag_socket.rb +16 -0
- data/test/test_last_data_recv.rb +57 -0
- data/test/test_last_data_recv_unicorn.rb +69 -0
- data/test/test_linux.rb +281 -0
- data/test/test_linux_all_tcp_listen_stats.rb +66 -0
- data/test/test_linux_all_tcp_listen_stats_leak.rb +43 -0
- data/test/test_linux_ipv6.rb +166 -0
- data/test/test_linux_middleware.rb +64 -0
- data/test/test_linux_reuseport_tcp_listen_stats.rb +51 -0
- data/test/test_middleware.rb +128 -0
- data/test/test_middleware_unicorn.rb +37 -0
- data/test/test_middleware_unicorn_ipv6.rb +37 -0
- data/test/test_raindrops.rb +207 -0
- data/test/test_raindrops_gc.rb +38 -0
- data/test/test_struct.rb +54 -0
- data/test/test_tcp_info.rb +88 -0
- data/test/test_watcher.rb +186 -0
- metadata +193 -0
@@ -0,0 +1,487 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <unistd.h>
|
3
|
+
#include <sys/mman.h>
|
4
|
+
#include <assert.h>
|
5
|
+
#include <errno.h>
|
6
|
+
#include <stddef.h>
|
7
|
+
#include <string.h>
|
8
|
+
#include "raindrops_atomic.h"
|
9
|
+
|
10
|
+
#ifndef SIZET2NUM
|
11
|
+
# define SIZET2NUM(x) ULONG2NUM(x)
|
12
|
+
#endif
|
13
|
+
#ifndef NUM2SIZET
|
14
|
+
# define NUM2SIZET(x) NUM2ULONG(x)
|
15
|
+
#endif
|
16
|
+
|
17
|
+
/*
|
18
|
+
* most modern CPUs have a cache-line size of 64 or 128.
|
19
|
+
* We choose a bigger one by default since our structure is not
|
20
|
+
* heavily used
|
21
|
+
*/
|
22
|
+
static size_t raindrop_size = 128;
|
23
|
+
static size_t rd_page_size;
|
24
|
+
|
25
|
+
#define PAGE_MASK (~(rd_page_size - 1))
|
26
|
+
#define PAGE_ALIGN(addr) (((addr) + rd_page_size - 1) & PAGE_MASK)
|
27
|
+
|
28
|
+
/* each raindrop is a counter */
|
29
|
+
struct raindrop {
|
30
|
+
unsigned long counter;
|
31
|
+
} __attribute__((packed));
|
32
|
+
|
33
|
+
/* allow mmap-ed regions to store more than one raindrop */
|
34
|
+
struct raindrops {
|
35
|
+
size_t size;
|
36
|
+
size_t capa;
|
37
|
+
pid_t pid;
|
38
|
+
VALUE io;
|
39
|
+
struct raindrop *drops;
|
40
|
+
};
|
41
|
+
|
42
|
+
/* called by GC */
|
43
|
+
static void rd_mark(void *ptr)
|
44
|
+
{
|
45
|
+
struct raindrops *r = ptr;
|
46
|
+
rb_gc_mark(r->io);
|
47
|
+
}
|
48
|
+
|
49
|
+
/* called by GC */
|
50
|
+
static void rd_free(void *ptr)
|
51
|
+
{
|
52
|
+
struct raindrops *r = ptr;
|
53
|
+
|
54
|
+
if (r->drops != MAP_FAILED) {
|
55
|
+
int rv = munmap(r->drops, raindrop_size * r->capa);
|
56
|
+
if (rv != 0)
|
57
|
+
rb_bug("munmap failed in gc: %s", strerror(errno));
|
58
|
+
}
|
59
|
+
|
60
|
+
xfree(ptr);
|
61
|
+
}
|
62
|
+
|
63
|
+
static size_t rd_memsize(const void *ptr)
|
64
|
+
{
|
65
|
+
const struct raindrops *r = ptr;
|
66
|
+
|
67
|
+
return r->drops == MAP_FAILED ? 0 : raindrop_size * r->capa;
|
68
|
+
}
|
69
|
+
|
70
|
+
static const rb_data_type_t rd_type = {
|
71
|
+
"raindrops",
|
72
|
+
{ rd_mark, rd_free, rd_memsize, /* reserved */ },
|
73
|
+
/* parent, data, [ flags ] */
|
74
|
+
};
|
75
|
+
|
76
|
+
/* automatically called at creation (before initialize) */
|
77
|
+
static VALUE alloc(VALUE klass)
|
78
|
+
{
|
79
|
+
struct raindrops *r;
|
80
|
+
VALUE rv = TypedData_Make_Struct(klass, struct raindrops, &rd_type, r);
|
81
|
+
|
82
|
+
r->drops = MAP_FAILED;
|
83
|
+
return rv;
|
84
|
+
}
|
85
|
+
|
86
|
+
static struct raindrops *get(VALUE self)
|
87
|
+
{
|
88
|
+
struct raindrops *r;
|
89
|
+
|
90
|
+
TypedData_Get_Struct(self, struct raindrops, &rd_type, r);
|
91
|
+
|
92
|
+
if (r->drops == MAP_FAILED)
|
93
|
+
rb_raise(rb_eStandardError, "invalid or freed Raindrops");
|
94
|
+
|
95
|
+
return r;
|
96
|
+
}
|
97
|
+
|
98
|
+
/*
|
99
|
+
* This is the _actual_ implementation of #initialize - the Ruby wrapper
|
100
|
+
* handles keyword-argument handling then calls this method.
|
101
|
+
*/
|
102
|
+
static VALUE init_cimpl(VALUE self, VALUE size, VALUE io, VALUE zero)
|
103
|
+
{
|
104
|
+
struct raindrops *r = DATA_PTR(self);
|
105
|
+
int tries = 1;
|
106
|
+
size_t tmp;
|
107
|
+
|
108
|
+
if (r->drops != MAP_FAILED)
|
109
|
+
rb_raise(rb_eRuntimeError, "already initialized");
|
110
|
+
|
111
|
+
r->size = NUM2SIZET(size);
|
112
|
+
if (r->size < 1)
|
113
|
+
rb_raise(rb_eArgError, "size must be >= 1");
|
114
|
+
|
115
|
+
tmp = PAGE_ALIGN(raindrop_size * r->size);
|
116
|
+
r->capa = tmp / raindrop_size;
|
117
|
+
assert(PAGE_ALIGN(raindrop_size * r->capa) == tmp && "not aligned");
|
118
|
+
|
119
|
+
r->io = io;
|
120
|
+
|
121
|
+
retry:
|
122
|
+
if (RTEST(r->io)) {
|
123
|
+
int fd = NUM2INT(rb_funcall(r->io, rb_intern("fileno"), 0));
|
124
|
+
rb_funcall(r->io, rb_intern("truncate"), 1, SIZET2NUM(tmp));
|
125
|
+
r->drops = mmap(NULL, tmp,
|
126
|
+
PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
127
|
+
} else {
|
128
|
+
r->drops = mmap(NULL, tmp,
|
129
|
+
PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED,
|
130
|
+
-1, 0);
|
131
|
+
}
|
132
|
+
if (r->drops == MAP_FAILED) {
|
133
|
+
int err = errno;
|
134
|
+
|
135
|
+
if ((err == EAGAIN || err == ENOMEM) && tries-- > 0) {
|
136
|
+
rb_gc();
|
137
|
+
goto retry;
|
138
|
+
}
|
139
|
+
rb_sys_fail("mmap");
|
140
|
+
}
|
141
|
+
r->pid = getpid();
|
142
|
+
|
143
|
+
if (RTEST(zero))
|
144
|
+
memset(r->drops, 0, tmp);
|
145
|
+
|
146
|
+
return self;
|
147
|
+
}
|
148
|
+
|
149
|
+
/*
|
150
|
+
* mremap() is currently broken with MAP_SHARED
|
151
|
+
* https://bugzilla.kernel.org/show_bug.cgi?id=8691
|
152
|
+
*/
|
153
|
+
#if defined(HAVE_MREMAP) && !defined(MREMAP_WORKS_WITH_MAP_SHARED)
|
154
|
+
# undef HAVE_MREMAP
|
155
|
+
#endif
|
156
|
+
|
157
|
+
#ifdef HAVE_MREMAP
|
158
|
+
#ifndef MREMAP_MAYMOVE
|
159
|
+
# warn MREMAP_MAYMOVE undefined
|
160
|
+
# define MREMAP_MAYMOVE 0
|
161
|
+
#endif
|
162
|
+
static void resize(struct raindrops *r, size_t new_rd_size)
|
163
|
+
{
|
164
|
+
size_t old_size = raindrop_size * r->capa;
|
165
|
+
size_t new_size = PAGE_ALIGN(raindrop_size * new_rd_size);
|
166
|
+
void *old_address = r->drops;
|
167
|
+
void *rv;
|
168
|
+
|
169
|
+
if (r->pid != getpid())
|
170
|
+
rb_raise(rb_eRuntimeError, "cannot mremap() from child");
|
171
|
+
|
172
|
+
rv = mremap(old_address, old_size, new_size, MREMAP_MAYMOVE);
|
173
|
+
if (rv == MAP_FAILED) {
|
174
|
+
int err = errno;
|
175
|
+
|
176
|
+
if (err == EAGAIN || err == ENOMEM) {
|
177
|
+
rb_gc();
|
178
|
+
rv = mremap(old_address, old_size, new_size, 0);
|
179
|
+
}
|
180
|
+
if (rv == MAP_FAILED)
|
181
|
+
rb_sys_fail("mremap");
|
182
|
+
}
|
183
|
+
r->drops = rv;
|
184
|
+
r->size = new_rd_size;
|
185
|
+
r->capa = new_size / raindrop_size;
|
186
|
+
assert(r->capa >= r->size && "bad sizing");
|
187
|
+
}
|
188
|
+
#else /* ! HAVE_MREMAP */
|
189
|
+
/*
|
190
|
+
* we cannot use munmap + mmap to reallocate the buffer since it may
|
191
|
+
* already be shared by other processes, so we just fail
|
192
|
+
*/
|
193
|
+
static void resize(struct raindrops *r, size_t new_rd_size)
|
194
|
+
{
|
195
|
+
rb_raise(rb_eRangeError, "mremap(2) is not available");
|
196
|
+
}
|
197
|
+
#endif /* ! HAVE_MREMAP */
|
198
|
+
|
199
|
+
/*
|
200
|
+
* call-seq:
|
201
|
+
* rd.size = new_size
|
202
|
+
*
|
203
|
+
* Increases or decreases the current capacity of our Raindrop.
|
204
|
+
* Raises RangeError if +new_size+ is too big or small for the
|
205
|
+
* current backing store
|
206
|
+
*/
|
207
|
+
static VALUE setsize(VALUE self, VALUE new_size)
|
208
|
+
{
|
209
|
+
size_t new_rd_size = NUM2SIZET(new_size);
|
210
|
+
struct raindrops *r = get(self);
|
211
|
+
|
212
|
+
if (new_rd_size <= r->capa)
|
213
|
+
r->size = new_rd_size;
|
214
|
+
else
|
215
|
+
resize(r, new_rd_size);
|
216
|
+
|
217
|
+
return new_size;
|
218
|
+
}
|
219
|
+
|
220
|
+
/*
|
221
|
+
* call-seq:
|
222
|
+
* rd.capa -> Integer
|
223
|
+
*
|
224
|
+
* Returns the number of slots allocated (but not necessarily used) by
|
225
|
+
* the Raindrops object.
|
226
|
+
*/
|
227
|
+
static VALUE capa(VALUE self)
|
228
|
+
{
|
229
|
+
return SIZET2NUM(get(self)->capa);
|
230
|
+
}
|
231
|
+
|
232
|
+
/*
|
233
|
+
* call-seq:
|
234
|
+
* rd.dup -> rd_copy
|
235
|
+
*
|
236
|
+
* Duplicates and snapshots the current state of a Raindrops object. Even
|
237
|
+
* if the given Raindrops object is backed by a file, the copy will be backed
|
238
|
+
* by independent, anonymously mapped memory.
|
239
|
+
*/
|
240
|
+
static VALUE init_copy(VALUE dest, VALUE source)
|
241
|
+
{
|
242
|
+
struct raindrops *dst = DATA_PTR(dest);
|
243
|
+
struct raindrops *src = get(source);
|
244
|
+
|
245
|
+
init_cimpl(dest, SIZET2NUM(src->size), Qnil, Qfalse);
|
246
|
+
memcpy(dst->drops, src->drops, raindrop_size * src->size);
|
247
|
+
|
248
|
+
return dest;
|
249
|
+
}
|
250
|
+
|
251
|
+
static unsigned long *addr_of(VALUE self, VALUE index)
|
252
|
+
{
|
253
|
+
struct raindrops *r = get(self);
|
254
|
+
unsigned long off = FIX2ULONG(index) * raindrop_size;
|
255
|
+
|
256
|
+
if (off >= raindrop_size * r->size)
|
257
|
+
rb_raise(rb_eArgError, "offset overrun");
|
258
|
+
|
259
|
+
return (unsigned long *)((unsigned long)r->drops + off);
|
260
|
+
}
|
261
|
+
|
262
|
+
static unsigned long incr_decr_arg(int argc, const VALUE *argv)
|
263
|
+
{
|
264
|
+
if (argc > 2 || argc < 1)
|
265
|
+
rb_raise(rb_eArgError,
|
266
|
+
"wrong number of arguments (%d for 1+)", argc);
|
267
|
+
|
268
|
+
return argc == 2 ? NUM2ULONG(argv[1]) : 1;
|
269
|
+
}
|
270
|
+
|
271
|
+
/*
|
272
|
+
* call-seq:
|
273
|
+
* rd.incr(index[, number]) -> result
|
274
|
+
*
|
275
|
+
* Increments the value referred to by the +index+ by +number+.
|
276
|
+
* +number+ defaults to +1+ if unspecified.
|
277
|
+
*/
|
278
|
+
static VALUE incr(int argc, VALUE *argv, VALUE self)
|
279
|
+
{
|
280
|
+
unsigned long nr = incr_decr_arg(argc, argv);
|
281
|
+
|
282
|
+
return ULONG2NUM(__sync_add_and_fetch(addr_of(self, argv[0]), nr));
|
283
|
+
}
|
284
|
+
|
285
|
+
/*
|
286
|
+
* call-seq:
|
287
|
+
* rd.decr(index[, number]) -> result
|
288
|
+
*
|
289
|
+
* Decrements the value referred to by the +index+ by +number+.
|
290
|
+
* +number+ defaults to +1+ if unspecified.
|
291
|
+
*/
|
292
|
+
static VALUE decr(int argc, VALUE *argv, VALUE self)
|
293
|
+
{
|
294
|
+
unsigned long nr = incr_decr_arg(argc, argv);
|
295
|
+
|
296
|
+
return ULONG2NUM(__sync_sub_and_fetch(addr_of(self, argv[0]), nr));
|
297
|
+
}
|
298
|
+
|
299
|
+
/*
|
300
|
+
* call-seq:
|
301
|
+
* rd.to_ary -> Array
|
302
|
+
*
|
303
|
+
* converts the Raindrops structure to an Array
|
304
|
+
*/
|
305
|
+
static VALUE to_ary(VALUE self)
|
306
|
+
{
|
307
|
+
struct raindrops *r = get(self);
|
308
|
+
VALUE rv = rb_ary_new2(r->size);
|
309
|
+
size_t i;
|
310
|
+
unsigned long base = (unsigned long)r->drops;
|
311
|
+
|
312
|
+
for (i = 0; i < r->size; i++) {
|
313
|
+
rb_ary_push(rv, ULONG2NUM(*((unsigned long *)base)));
|
314
|
+
base += raindrop_size;
|
315
|
+
}
|
316
|
+
|
317
|
+
return rv;
|
318
|
+
}
|
319
|
+
|
320
|
+
/*
|
321
|
+
* call-seq:
|
322
|
+
* rd.size -> Integer
|
323
|
+
*
|
324
|
+
* Returns the number of counters a Raindrops object can hold. Due to
|
325
|
+
* page alignment, this is always equal or greater than the number of
|
326
|
+
* requested slots passed to Raindrops.new
|
327
|
+
*/
|
328
|
+
static VALUE size(VALUE self)
|
329
|
+
{
|
330
|
+
return SIZET2NUM(get(self)->size);
|
331
|
+
}
|
332
|
+
|
333
|
+
/*
|
334
|
+
* call-seq:
|
335
|
+
* rd[index] = value
|
336
|
+
*
|
337
|
+
* Assigns +value+ to the slot designated by +index+
|
338
|
+
*/
|
339
|
+
static VALUE aset(VALUE self, VALUE index, VALUE value)
|
340
|
+
{
|
341
|
+
unsigned long *addr = addr_of(self, index);
|
342
|
+
|
343
|
+
*addr = NUM2ULONG(value);
|
344
|
+
|
345
|
+
return value;
|
346
|
+
}
|
347
|
+
|
348
|
+
/*
|
349
|
+
* call-seq:
|
350
|
+
* rd[index] -> value
|
351
|
+
*
|
352
|
+
* Returns the value of the slot designated by +index+
|
353
|
+
*/
|
354
|
+
static VALUE aref(VALUE self, VALUE index)
|
355
|
+
{
|
356
|
+
return ULONG2NUM(*addr_of(self, index));
|
357
|
+
}
|
358
|
+
|
359
|
+
#ifdef __linux__
|
360
|
+
void Init_raindrops_linux_inet_diag(void);
|
361
|
+
#endif
|
362
|
+
#ifdef HAVE_TYPE_STRUCT_TCP_INFO
|
363
|
+
void Init_raindrops_tcp_info(void);
|
364
|
+
#endif
|
365
|
+
|
366
|
+
#ifndef _SC_NPROCESSORS_CONF
|
367
|
+
# if defined _SC_NPROCESSORS_ONLN
|
368
|
+
# define _SC_NPROCESSORS_CONF _SC_NPROCESSORS_ONLN
|
369
|
+
# elif defined _SC_NPROC_ONLN
|
370
|
+
# define _SC_NPROCESSORS_CONF _SC_NPROC_ONLN
|
371
|
+
# elif defined _SC_CRAY_NCPU
|
372
|
+
# define _SC_NPROCESSORS_CONF _SC_CRAY_NCPU
|
373
|
+
# endif
|
374
|
+
#endif
|
375
|
+
|
376
|
+
/*
|
377
|
+
* call-seq:
|
378
|
+
* rd.evaporate! -> nil
|
379
|
+
*
|
380
|
+
* Releases mmap()-ed memory allocated for the Raindrops object back
|
381
|
+
* to the OS. The Ruby garbage collector will also release memory
|
382
|
+
* automatically when it is not needed, but this forces release
|
383
|
+
* under high memory pressure.
|
384
|
+
*/
|
385
|
+
static VALUE evaporate_bang(VALUE self)
|
386
|
+
{
|
387
|
+
struct raindrops *r = get(self);
|
388
|
+
void *addr = r->drops;
|
389
|
+
|
390
|
+
r->drops = MAP_FAILED;
|
391
|
+
if (munmap(addr, raindrop_size * r->capa) != 0)
|
392
|
+
rb_sys_fail("munmap");
|
393
|
+
return Qnil;
|
394
|
+
}
|
395
|
+
|
396
|
+
/*
|
397
|
+
* call-seq:
|
398
|
+
* to_io -> IO
|
399
|
+
*
|
400
|
+
* Returns the IO object backing the memory for this raindrop, if
|
401
|
+
* one was specified when constructing this Raindrop. If this
|
402
|
+
* Raindrop is backed by anonymous memory, this method returns nil.
|
403
|
+
*/
|
404
|
+
static VALUE to_io(VALUE self)
|
405
|
+
{
|
406
|
+
struct raindrops *r = get(self);
|
407
|
+
return r->io;
|
408
|
+
}
|
409
|
+
|
410
|
+
void Init_raindrops_ext(void)
|
411
|
+
{
|
412
|
+
VALUE cRaindrops = rb_define_class("Raindrops", rb_cObject);
|
413
|
+
long tmp = 2;
|
414
|
+
|
415
|
+
#ifdef _SC_NPROCESSORS_CONF
|
416
|
+
tmp = sysconf(_SC_NPROCESSORS_CONF);
|
417
|
+
#endif
|
418
|
+
/* no point in padding on single CPU machines */
|
419
|
+
if (tmp == 1)
|
420
|
+
raindrop_size = sizeof(unsigned long);
|
421
|
+
#ifdef _SC_LEVEL1_DCACHE_LINESIZE
|
422
|
+
if (tmp != 1) {
|
423
|
+
tmp = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
|
424
|
+
if (tmp > 0)
|
425
|
+
raindrop_size = (size_t)tmp;
|
426
|
+
}
|
427
|
+
#endif
|
428
|
+
#if defined(_SC_PAGE_SIZE)
|
429
|
+
rd_page_size = (size_t)sysconf(_SC_PAGE_SIZE);
|
430
|
+
#elif defined(_SC_PAGESIZE)
|
431
|
+
rd_page_size = (size_t)sysconf(_SC_PAGESIZE);
|
432
|
+
#elif defined(HAVE_GETPAGESIZE)
|
433
|
+
rd_page_size = (size_t)getpagesize();
|
434
|
+
#elif defined(PAGE_SIZE)
|
435
|
+
rd_page_size = (size_t)PAGE_SIZE;
|
436
|
+
#elif defined(PAGESIZE)
|
437
|
+
rd_page_size = (size_t)PAGESIZE;
|
438
|
+
#else
|
439
|
+
# error unable to detect page size for mmap()
|
440
|
+
#endif
|
441
|
+
if ((rd_page_size == (size_t)-1) || (rd_page_size < raindrop_size))
|
442
|
+
rb_raise(rb_eRuntimeError,
|
443
|
+
"system page size invalid: %llu",
|
444
|
+
(unsigned long long)rd_page_size);
|
445
|
+
|
446
|
+
/*
|
447
|
+
* The size of one page of memory for a mmap()-ed Raindrops region.
|
448
|
+
* Typically 4096 bytes under Linux.
|
449
|
+
*/
|
450
|
+
rb_define_const(cRaindrops, "PAGE_SIZE", SIZET2NUM(rd_page_size));
|
451
|
+
|
452
|
+
/*
|
453
|
+
* The size (in bytes) of a slot in a Raindrops object.
|
454
|
+
* This is the size of a word on single CPU systems and
|
455
|
+
* the size of the L1 cache line size if detectable.
|
456
|
+
*
|
457
|
+
* Defaults to 128 bytes if undetectable.
|
458
|
+
*/
|
459
|
+
rb_define_const(cRaindrops, "SIZE", SIZET2NUM(raindrop_size));
|
460
|
+
|
461
|
+
/*
|
462
|
+
* The maximum value a raindrop counter can hold
|
463
|
+
*/
|
464
|
+
rb_define_const(cRaindrops, "MAX", ULONG2NUM((unsigned long)-1));
|
465
|
+
|
466
|
+
rb_define_alloc_func(cRaindrops, alloc);
|
467
|
+
|
468
|
+
rb_define_private_method(cRaindrops, "initialize_cimpl", init_cimpl, 3);
|
469
|
+
rb_define_method(cRaindrops, "incr", incr, -1);
|
470
|
+
rb_define_method(cRaindrops, "decr", decr, -1);
|
471
|
+
rb_define_method(cRaindrops, "to_ary", to_ary, 0);
|
472
|
+
rb_define_method(cRaindrops, "[]", aref, 1);
|
473
|
+
rb_define_method(cRaindrops, "[]=", aset, 2);
|
474
|
+
rb_define_method(cRaindrops, "size", size, 0);
|
475
|
+
rb_define_method(cRaindrops, "size=", setsize, 1);
|
476
|
+
rb_define_method(cRaindrops, "capa", capa, 0);
|
477
|
+
rb_define_method(cRaindrops, "initialize_copy", init_copy, 1);
|
478
|
+
rb_define_method(cRaindrops, "evaporate!", evaporate_bang, 0);
|
479
|
+
rb_define_method(cRaindrops, "to_io", to_io, 0);
|
480
|
+
|
481
|
+
#ifdef __linux__
|
482
|
+
Init_raindrops_linux_inet_diag();
|
483
|
+
#endif
|
484
|
+
#ifdef HAVE_TYPE_STRUCT_TCP_INFO
|
485
|
+
Init_raindrops_tcp_info();
|
486
|
+
#endif
|
487
|
+
}
|
@@ -0,0 +1,23 @@
|
|
1
|
+
/*
|
2
|
+
* use wrappers around libatomic-ops for folks that don't have GCC
|
3
|
+
* or a new enough version of GCC
|
4
|
+
*/
|
5
|
+
#ifndef HAVE_GCC_ATOMIC_BUILTINS
|
6
|
+
#include <atomic_ops.h>
|
7
|
+
|
8
|
+
static inline unsigned long
|
9
|
+
__sync_add_and_fetch(unsigned long *dst, unsigned long incr)
|
10
|
+
{
|
11
|
+
AO_t tmp = AO_fetch_and_add((AO_t *)dst, (AO_t)incr);
|
12
|
+
|
13
|
+
return (unsigned long)tmp + incr;
|
14
|
+
}
|
15
|
+
|
16
|
+
static inline unsigned long
|
17
|
+
__sync_sub_and_fetch(unsigned long *dst, unsigned long incr)
|
18
|
+
{
|
19
|
+
AO_t tmp = AO_fetch_and_add((AO_t *)dst, (AO_t)(-(long)incr));
|
20
|
+
|
21
|
+
return (unsigned long)tmp - incr;
|
22
|
+
}
|
23
|
+
#endif /* HAVE_GCC_ATOMIC_BUILTINS */
|