zookeeper-ng 1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.ctags_paths +1 -0
- data/.dotfiles/ruby-gemset +1 -0
- data/.dotfiles/ruby-version +1 -0
- data/.dotfiles/rvmrc +2 -0
- data/.gitignore +19 -0
- data/.gitmodules +3 -0
- data/.travis.yml +25 -0
- data/CHANGELOG +395 -0
- data/Gemfile +30 -0
- data/Guardfile +8 -0
- data/LICENSE +23 -0
- data/Manifest +29 -0
- data/README.markdown +85 -0
- data/Rakefile +121 -0
- data/cause-abort.rb +117 -0
- data/ext/.gitignore +6 -0
- data/ext/Rakefile +41 -0
- data/ext/c_zookeeper.rb +398 -0
- data/ext/common.h +17 -0
- data/ext/dbg.h +53 -0
- data/ext/depend +5 -0
- data/ext/event_lib.c +740 -0
- data/ext/event_lib.h +175 -0
- data/ext/extconf.rb +103 -0
- data/ext/generate_gvl_code.rb +321 -0
- data/ext/patches/zkc-3.3.5-network.patch +24 -0
- data/ext/patches/zkc-3.4.5-fetch-and-add.patch +16 -0
- data/ext/patches/zkc-3.4.5-logging.patch +41 -0
- data/ext/patches/zkc-3.4.5-out-of-order-ping.patch +163 -0
- data/ext/patches/zkc-3.4.5-overflow.patch +11 -0
- data/ext/patches/zkc-3.4.5-yosemite-htonl-fix.patch +102 -0
- data/ext/zkc-3.4.5.tar.gz +0 -0
- data/ext/zkrb.c +1075 -0
- data/ext/zkrb_wrapper.c +775 -0
- data/ext/zkrb_wrapper.h +350 -0
- data/ext/zkrb_wrapper_compat.c +15 -0
- data/ext/zkrb_wrapper_compat.h +11 -0
- data/ext/zookeeper_base.rb +256 -0
- data/java/java_base.rb +503 -0
- data/lib/zookeeper.rb +115 -0
- data/lib/zookeeper/acls.rb +44 -0
- data/lib/zookeeper/callbacks.rb +108 -0
- data/lib/zookeeper/client.rb +30 -0
- data/lib/zookeeper/client_methods.rb +282 -0
- data/lib/zookeeper/common.rb +122 -0
- data/lib/zookeeper/common/queue_with_pipe.rb +110 -0
- data/lib/zookeeper/compatibility.rb +138 -0
- data/lib/zookeeper/constants.rb +97 -0
- data/lib/zookeeper/continuation.rb +223 -0
- data/lib/zookeeper/core_ext.rb +58 -0
- data/lib/zookeeper/em_client.rb +55 -0
- data/lib/zookeeper/exceptions.rb +135 -0
- data/lib/zookeeper/forked.rb +19 -0
- data/lib/zookeeper/latch.rb +34 -0
- data/lib/zookeeper/logger.rb +39 -0
- data/lib/zookeeper/logger/forwarding_logger.rb +84 -0
- data/lib/zookeeper/monitor.rb +19 -0
- data/lib/zookeeper/rake_tasks.rb +165 -0
- data/lib/zookeeper/request_registry.rb +153 -0
- data/lib/zookeeper/stat.rb +21 -0
- data/lib/zookeeper/version.rb +4 -0
- data/notes.txt +14 -0
- data/scripts/upgrade-1.0-sed-alike.rb +46 -0
- data/spec/c_zookeeper_spec.rb +51 -0
- data/spec/chrooted_connection_spec.rb +83 -0
- data/spec/compatibilty_spec.rb +8 -0
- data/spec/default_watcher_spec.rb +41 -0
- data/spec/em_spec.rb +51 -0
- data/spec/ext/zookeeper_base_spec.rb +19 -0
- data/spec/forked_connection_spec.rb +124 -0
- data/spec/latch_spec.rb +24 -0
- data/spec/log4j.properties +17 -0
- data/spec/shared/all_success_return_values.rb +10 -0
- data/spec/shared/connection_examples.rb +1077 -0
- data/spec/spec_helper.rb +61 -0
- data/spec/support/00_logging.rb +38 -0
- data/spec/support/10_spawn_zookeeper.rb +24 -0
- data/spec/support/progress_formatter.rb +15 -0
- data/spec/support/zookeeper_spec_helpers.rb +96 -0
- data/spec/zookeeper_spec.rb +24 -0
- data/zookeeper.gemspec +38 -0
- data/zoomonkey/duplicates +3 -0
- data/zoomonkey/zoomonkey.rb +194 -0
- metadata +157 -0
data/ext/depend
ADDED
data/ext/event_lib.c
ADDED
@@ -0,0 +1,740 @@
|
|
1
|
+
/* Ruby wrapper for the Zookeeper C API
|
2
|
+
|
3
|
+
This file contains three sets of helpers:
|
4
|
+
- the event queue that glues RB<->C together
|
5
|
+
- the completions that marshall data between RB<->C formats
|
6
|
+
- functions for translating between Ruby and C versions of ZK datatypes
|
7
|
+
|
8
|
+
wickman@twitter.com
|
9
|
+
|
10
|
+
|
11
|
+
NOTE: be *very careful* in these functions, calling *ANY* ruby interpreter
|
12
|
+
function when you're not in an interpreter thread can hork ruby, trigger a
|
13
|
+
[BUG], corrupt the stack, kill your dog, knock up your daughter, etc. etc.
|
14
|
+
|
15
|
+
NOTE: the above is only true when you're running in THREADED mode, in
|
16
|
+
single-threaded, everything is called on an interpreter thread.
|
17
|
+
|
18
|
+
|
19
|
+
slyphon@gmail.com
|
20
|
+
|
21
|
+
*/
|
22
|
+
|
23
|
+
#include "ruby.h"
|
24
|
+
#include "zookeeper/zookeeper.h"
|
25
|
+
#include <errno.h>
|
26
|
+
#include <stdio.h>
|
27
|
+
#include <stdlib.h>
|
28
|
+
#include <pthread.h>
|
29
|
+
#include <unistd.h>
|
30
|
+
#include <inttypes.h>
|
31
|
+
#include "common.h"
|
32
|
+
#include "event_lib.h"
|
33
|
+
#include "dbg.h"
|
34
|
+
|
35
|
+
#ifndef THREADED
|
36
|
+
#define USE_XMALLOC
|
37
|
+
#endif
|
38
|
+
|
39
|
+
#define GET_SYM(str) ID2SYM(rb_intern(str))
|
40
|
+
|
41
|
+
int ZKRBDebugging;
|
42
|
+
|
43
|
+
#if THREADED
|
44
|
+
pthread_mutex_t zkrb_q_mutex = PTHREAD_MUTEX_INITIALIZER;
|
45
|
+
#endif
|
46
|
+
|
47
|
+
inline static int global_mutex_lock() {
|
48
|
+
int rv=0;
|
49
|
+
#if THREADED
|
50
|
+
rv = pthread_mutex_lock(&zkrb_q_mutex);
|
51
|
+
if (rv != 0) log_err("global_mutex_lock error");
|
52
|
+
#endif
|
53
|
+
return rv;
|
54
|
+
}
|
55
|
+
|
56
|
+
inline static int global_mutex_unlock() {
|
57
|
+
int rv=0;
|
58
|
+
#if THREADED
|
59
|
+
rv = pthread_mutex_unlock(&zkrb_q_mutex);
|
60
|
+
if (rv != 0) log_err("global_mutex_unlock error");
|
61
|
+
#endif
|
62
|
+
return rv;
|
63
|
+
}
|
64
|
+
|
65
|
+
// we can use the ruby xmalloc/xfree that will raise errors
|
66
|
+
// in the case of a failure to allocate memory, and can cycle
|
67
|
+
// the garbage collector in some cases.
|
68
|
+
|
69
|
+
inline static void* zk_malloc(size_t size) {
|
70
|
+
#ifdef USE_XMALLOC
|
71
|
+
return xmalloc(size);
|
72
|
+
#else
|
73
|
+
return malloc(size);
|
74
|
+
#endif
|
75
|
+
}
|
76
|
+
|
77
|
+
inline static void zk_free(void *ptr) {
|
78
|
+
#ifdef USE_XMALLOC
|
79
|
+
xfree(ptr);
|
80
|
+
#else
|
81
|
+
free(ptr);
|
82
|
+
#endif
|
83
|
+
}
|
84
|
+
|
85
|
+
void zkrb_enqueue(zkrb_queue_t *q, zkrb_event_t *elt) {
|
86
|
+
if (q == NULL) {
|
87
|
+
zkrb_debug("zkrb_enqueue, queue ptr was NULL");
|
88
|
+
return;
|
89
|
+
}
|
90
|
+
|
91
|
+
if (q->tail == NULL) {
|
92
|
+
zkrb_debug("zkrb_enqeue, q->tail was NULL");
|
93
|
+
return;
|
94
|
+
}
|
95
|
+
|
96
|
+
global_mutex_lock();
|
97
|
+
|
98
|
+
q->tail->event = elt;
|
99
|
+
q->tail->next = (zkrb_event_ll_t *)zk_malloc(sizeof(zkrb_event_ll_t));
|
100
|
+
q->tail = q->tail->next;
|
101
|
+
q->tail->event = NULL;
|
102
|
+
q->tail->next = NULL;
|
103
|
+
|
104
|
+
global_mutex_unlock();
|
105
|
+
|
106
|
+
#if THREADED
|
107
|
+
ssize_t ret = write(q->pipe_write, "0", 1); /* Wake up Ruby listener */
|
108
|
+
|
109
|
+
if (ret < 0)
|
110
|
+
log_err("write to queue (%p) pipe failed!\n", q);
|
111
|
+
#endif
|
112
|
+
|
113
|
+
}
|
114
|
+
|
115
|
+
// NOTE: the zkrb_event_t* returned *is* the same pointer that's part of the
|
116
|
+
// queue, the only place this is used is in method_has_events, and it is simply
|
117
|
+
// tested for null-ness. it's probably better to make the null-test here and
|
118
|
+
// not return the pointer
|
119
|
+
//
|
120
|
+
zkrb_event_t * zkrb_peek(zkrb_queue_t *q) {
|
121
|
+
zkrb_event_t *event = NULL;
|
122
|
+
|
123
|
+
if (!q) return NULL;
|
124
|
+
|
125
|
+
global_mutex_lock();
|
126
|
+
|
127
|
+
if (q != NULL && q->head != NULL && q->head->event != NULL) {
|
128
|
+
event = q->head->event;
|
129
|
+
}
|
130
|
+
|
131
|
+
global_mutex_unlock();
|
132
|
+
|
133
|
+
return event;
|
134
|
+
}
|
135
|
+
|
136
|
+
#define ZKRB_QUEUE_EMPTY(q) (q == NULL || q->head == NULL || q->head->event == NULL)
|
137
|
+
|
138
|
+
zkrb_event_t* zkrb_dequeue(zkrb_queue_t *q, int need_lock) {
|
139
|
+
zkrb_event_t *rv = NULL;
|
140
|
+
zkrb_event_ll_t *old_root = NULL;
|
141
|
+
|
142
|
+
if (need_lock)
|
143
|
+
global_mutex_lock();
|
144
|
+
|
145
|
+
if (!ZKRB_QUEUE_EMPTY(q)) {
|
146
|
+
old_root = q->head;
|
147
|
+
q->head = q->head->next;
|
148
|
+
rv = old_root->event;
|
149
|
+
}
|
150
|
+
|
151
|
+
if (need_lock)
|
152
|
+
global_mutex_unlock();
|
153
|
+
|
154
|
+
zk_free(old_root);
|
155
|
+
return rv;
|
156
|
+
}
|
157
|
+
|
158
|
+
void zkrb_signal(zkrb_queue_t *q) {
|
159
|
+
if (!q) return;
|
160
|
+
|
161
|
+
global_mutex_lock();
|
162
|
+
|
163
|
+
#if THREADED
|
164
|
+
if (!write(q->pipe_write, "0", 1)) /* Wake up Ruby listener */
|
165
|
+
log_err("zkrb_signal: write to pipe failed, could not wake");
|
166
|
+
#endif
|
167
|
+
|
168
|
+
global_mutex_unlock();
|
169
|
+
}
|
170
|
+
|
171
|
+
zkrb_event_ll_t *zkrb_event_ll_t_alloc(void) {
|
172
|
+
zkrb_event_ll_t *rv = zk_malloc(sizeof(zkrb_event_ll_t));
|
173
|
+
|
174
|
+
if (!rv) return NULL;
|
175
|
+
|
176
|
+
rv->event = NULL;
|
177
|
+
rv->next = NULL;
|
178
|
+
|
179
|
+
return rv;
|
180
|
+
}
|
181
|
+
|
182
|
+
zkrb_queue_t *zkrb_queue_alloc(void) {
|
183
|
+
zkrb_queue_t *rq = NULL;
|
184
|
+
|
185
|
+
#if THREADED
|
186
|
+
int pfd[2];
|
187
|
+
check(pipe(pfd) == 0, "creating the signal pipe failed");
|
188
|
+
#endif
|
189
|
+
|
190
|
+
rq = zk_malloc(sizeof(zkrb_queue_t));
|
191
|
+
check_mem(rq);
|
192
|
+
|
193
|
+
rq->orig_pid = getpid();
|
194
|
+
|
195
|
+
rq->head = zkrb_event_ll_t_alloc();
|
196
|
+
check_mem(rq->head);
|
197
|
+
|
198
|
+
rq->tail = rq->head;
|
199
|
+
|
200
|
+
#if THREADED
|
201
|
+
rq->pipe_read = pfd[0];
|
202
|
+
rq->pipe_write = pfd[1];
|
203
|
+
#endif
|
204
|
+
|
205
|
+
return rq;
|
206
|
+
|
207
|
+
error:
|
208
|
+
zk_free(rq);
|
209
|
+
return NULL;
|
210
|
+
}
|
211
|
+
|
212
|
+
void zkrb_queue_free(zkrb_queue_t *queue) {
|
213
|
+
if (!queue) return;
|
214
|
+
|
215
|
+
zkrb_event_t *elt;
|
216
|
+
while ((elt = zkrb_dequeue(queue, 0)) != NULL) {
|
217
|
+
zkrb_event_free(elt);
|
218
|
+
}
|
219
|
+
|
220
|
+
zk_free(queue->head);
|
221
|
+
|
222
|
+
#if THREADED
|
223
|
+
close(queue->pipe_read);
|
224
|
+
close(queue->pipe_write);
|
225
|
+
#endif
|
226
|
+
|
227
|
+
zk_free(queue);
|
228
|
+
}
|
229
|
+
|
230
|
+
zkrb_event_t *zkrb_event_alloc(void) {
|
231
|
+
zkrb_event_t *rv = zk_malloc(sizeof(zkrb_event_t));
|
232
|
+
return rv;
|
233
|
+
}
|
234
|
+
|
235
|
+
void zkrb_event_free(zkrb_event_t *event) {
|
236
|
+
switch (event->type) {
|
237
|
+
case ZKRB_DATA: {
|
238
|
+
struct zkrb_data_completion *data_ctx = event->completion.data_completion;
|
239
|
+
zk_free(data_ctx->data);
|
240
|
+
zk_free(data_ctx->stat);
|
241
|
+
zk_free(data_ctx);
|
242
|
+
break;
|
243
|
+
}
|
244
|
+
case ZKRB_STAT: {
|
245
|
+
struct zkrb_stat_completion *stat_ctx = event->completion.stat_completion;
|
246
|
+
zk_free(stat_ctx->stat);
|
247
|
+
zk_free(stat_ctx);
|
248
|
+
break;
|
249
|
+
}
|
250
|
+
case ZKRB_STRING: {
|
251
|
+
struct zkrb_string_completion *string_ctx = event->completion.string_completion;
|
252
|
+
zk_free(string_ctx->value);
|
253
|
+
zk_free(string_ctx);
|
254
|
+
break;
|
255
|
+
}
|
256
|
+
case ZKRB_STRINGS: {
|
257
|
+
struct zkrb_strings_completion *strings_ctx = event->completion.strings_completion;
|
258
|
+
int k;
|
259
|
+
if (strings_ctx->values) {
|
260
|
+
for (k = 0; k < strings_ctx->values->count; ++k) {
|
261
|
+
zk_free(strings_ctx->values->data[k]);
|
262
|
+
}
|
263
|
+
zk_free(strings_ctx->values);
|
264
|
+
}
|
265
|
+
zk_free(strings_ctx);
|
266
|
+
break;
|
267
|
+
}
|
268
|
+
case ZKRB_STRINGS_STAT: {
|
269
|
+
struct zkrb_strings_stat_completion *strings_stat_ctx = event->completion.strings_stat_completion;
|
270
|
+
int k;
|
271
|
+
if (strings_stat_ctx->values) {
|
272
|
+
for (k = 0; k < strings_stat_ctx->values->count; ++k) {
|
273
|
+
zk_free(strings_stat_ctx->values->data[k]);
|
274
|
+
}
|
275
|
+
zk_free(strings_stat_ctx->values);
|
276
|
+
}
|
277
|
+
|
278
|
+
if (strings_stat_ctx->stat) zk_free(strings_stat_ctx->stat);
|
279
|
+
zk_free(strings_stat_ctx);
|
280
|
+
break;
|
281
|
+
}
|
282
|
+
case ZKRB_ACL: {
|
283
|
+
struct zkrb_acl_completion *acl_ctx = event->completion.acl_completion;
|
284
|
+
if (acl_ctx->acl) {
|
285
|
+
deallocate_ACL_vector(acl_ctx->acl);
|
286
|
+
zk_free(acl_ctx->acl);
|
287
|
+
}
|
288
|
+
zk_free(acl_ctx->stat);
|
289
|
+
zk_free(acl_ctx);
|
290
|
+
break;
|
291
|
+
}
|
292
|
+
case ZKRB_WATCHER: {
|
293
|
+
struct zkrb_watcher_completion *watcher_ctx = event->completion.watcher_completion;
|
294
|
+
zk_free(watcher_ctx->path);
|
295
|
+
zk_free(watcher_ctx);
|
296
|
+
break;
|
297
|
+
}
|
298
|
+
case ZKRB_VOID: {
|
299
|
+
break;
|
300
|
+
}
|
301
|
+
default:
|
302
|
+
log_err("unrecognized event in event_free!");
|
303
|
+
}
|
304
|
+
|
305
|
+
zk_free(event);
|
306
|
+
}
|
307
|
+
|
308
|
+
/* this is called only from a method_get_latest_event, so the hash is
|
309
|
+
allocated on the proper thread stack */
|
310
|
+
VALUE zkrb_event_to_ruby(zkrb_event_t *event) {
|
311
|
+
VALUE hash = rb_hash_new();
|
312
|
+
|
313
|
+
if (!event) {
|
314
|
+
log_err("event was NULL in zkrb_event_to_ruby");
|
315
|
+
return hash;
|
316
|
+
}
|
317
|
+
|
318
|
+
rb_hash_aset(hash, GET_SYM("req_id"), LL2NUM(event->req_id));
|
319
|
+
if (event->type != ZKRB_WATCHER)
|
320
|
+
rb_hash_aset(hash, GET_SYM("rc"), INT2FIX(event->rc));
|
321
|
+
|
322
|
+
switch (event->type) {
|
323
|
+
case ZKRB_DATA: {
|
324
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_DATA");
|
325
|
+
struct zkrb_data_completion *data_ctx = event->completion.data_completion;
|
326
|
+
if (ZKRBDebugging) zkrb_print_stat(data_ctx->stat);
|
327
|
+
rb_hash_aset(hash, GET_SYM("data"), data_ctx->data ? rb_str_new(data_ctx->data, data_ctx->data_len) : Qnil);
|
328
|
+
rb_hash_aset(hash, GET_SYM("stat"), data_ctx->stat ? zkrb_stat_to_rarray(data_ctx->stat) : Qnil);
|
329
|
+
break;
|
330
|
+
}
|
331
|
+
case ZKRB_STAT: {
|
332
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_STAT");
|
333
|
+
struct zkrb_stat_completion *stat_ctx = event->completion.stat_completion;
|
334
|
+
rb_hash_aset(hash, GET_SYM("stat"), stat_ctx->stat ? zkrb_stat_to_rarray(stat_ctx->stat) : Qnil);
|
335
|
+
break;
|
336
|
+
}
|
337
|
+
case ZKRB_STRING: {
|
338
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_STRING");
|
339
|
+
struct zkrb_string_completion *string_ctx = event->completion.string_completion;
|
340
|
+
rb_hash_aset(hash, GET_SYM("string"), string_ctx->value ? rb_str_new2(string_ctx->value) : Qnil);
|
341
|
+
break;
|
342
|
+
}
|
343
|
+
case ZKRB_STRINGS: {
|
344
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_STRINGS");
|
345
|
+
struct zkrb_strings_completion *strings_ctx = event->completion.strings_completion;
|
346
|
+
rb_hash_aset(hash, GET_SYM("strings"), strings_ctx->values ? zkrb_string_vector_to_ruby(strings_ctx->values) : Qnil);
|
347
|
+
break;
|
348
|
+
}
|
349
|
+
case ZKRB_STRINGS_STAT: {
|
350
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_STRINGS_STAT");
|
351
|
+
struct zkrb_strings_stat_completion *strings_stat_ctx = event->completion.strings_stat_completion;
|
352
|
+
rb_hash_aset(hash, GET_SYM("strings"), strings_stat_ctx->values ? zkrb_string_vector_to_ruby(strings_stat_ctx->values) : Qnil);
|
353
|
+
rb_hash_aset(hash, GET_SYM("stat"), strings_stat_ctx->stat ? zkrb_stat_to_rarray(strings_stat_ctx->stat) : Qnil);
|
354
|
+
break;
|
355
|
+
}
|
356
|
+
case ZKRB_ACL: {
|
357
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_ACL");
|
358
|
+
struct zkrb_acl_completion *acl_ctx = event->completion.acl_completion;
|
359
|
+
rb_hash_aset(hash, GET_SYM("acl"), acl_ctx->acl ? zkrb_acl_vector_to_ruby(acl_ctx->acl) : Qnil);
|
360
|
+
rb_hash_aset(hash, GET_SYM("stat"), acl_ctx->stat ? zkrb_stat_to_rarray(acl_ctx->stat) : Qnil);
|
361
|
+
break;
|
362
|
+
}
|
363
|
+
case ZKRB_WATCHER: {
|
364
|
+
zkrb_debug("zkrb_event_to_ruby ZKRB_WATCHER");
|
365
|
+
struct zkrb_watcher_completion *watcher_ctx = event->completion.watcher_completion;
|
366
|
+
rb_hash_aset(hash, GET_SYM("type"), INT2FIX(watcher_ctx->type));
|
367
|
+
rb_hash_aset(hash, GET_SYM("state"), INT2FIX(watcher_ctx->state));
|
368
|
+
rb_hash_aset(hash, GET_SYM("path"), watcher_ctx->path ? rb_str_new2(watcher_ctx->path) : Qnil);
|
369
|
+
break;
|
370
|
+
}
|
371
|
+
case ZKRB_VOID:
|
372
|
+
default:
|
373
|
+
break;
|
374
|
+
}
|
375
|
+
|
376
|
+
return hash;
|
377
|
+
}
|
378
|
+
|
379
|
+
void zkrb_print_stat(const struct Stat *s) {
|
380
|
+
if (s != NULL) {
|
381
|
+
fprintf(stderr, "stat {\n");
|
382
|
+
fprintf(stderr, "\t czxid: %"PRId64"\n", s->czxid); // PRId64 defined in inttypes.h
|
383
|
+
fprintf(stderr, "\t mzxid: %"PRId64"\n", s->mzxid);
|
384
|
+
fprintf(stderr, "\t ctime: %"PRId64"\n", s->ctime);
|
385
|
+
fprintf(stderr, "\t mtime: %"PRId64"\n", s->mtime);
|
386
|
+
fprintf(stderr, "\t version: %d\n", s->version);
|
387
|
+
fprintf(stderr, "\t cversion: %d\n", s->cversion);
|
388
|
+
fprintf(stderr, "\t aversion: %d\n", s->aversion);
|
389
|
+
fprintf(stderr, "\t ephemeralOwner: %"PRId64"\n", s->ephemeralOwner);
|
390
|
+
fprintf(stderr, "\t dataLength: %d\n", s->dataLength);
|
391
|
+
fprintf(stderr, "\t numChildren: %d\n", s->numChildren);
|
392
|
+
fprintf(stderr, "\t pzxid: %"PRId64"\n", s->pzxid);
|
393
|
+
fprintf(stderr, "}\n");
|
394
|
+
} else {
|
395
|
+
fprintf(stderr, "stat { NULL }\n");
|
396
|
+
}
|
397
|
+
}
|
398
|
+
|
399
|
+
zkrb_calling_context *zkrb_calling_context_alloc(int64_t req_id, zkrb_queue_t *queue) {
|
400
|
+
zkrb_calling_context *ctx = zk_malloc(sizeof(zkrb_calling_context));
|
401
|
+
if (!ctx) return NULL;
|
402
|
+
|
403
|
+
ctx->req_id = req_id;
|
404
|
+
ctx->queue = queue;
|
405
|
+
|
406
|
+
return ctx;
|
407
|
+
}
|
408
|
+
|
409
|
+
void zkrb_calling_context_free(zkrb_calling_context *ctx) {
|
410
|
+
zk_free(ctx);
|
411
|
+
}
|
412
|
+
|
413
|
+
void zkrb_print_calling_context(zkrb_calling_context *ctx) {
|
414
|
+
fprintf(stderr, "calling context (%p){\n", ctx);
|
415
|
+
fprintf(stderr, "\treq_id = %"PRId64"\n", ctx->req_id);
|
416
|
+
fprintf(stderr, "\tqueue = %p\n", ctx->queue);
|
417
|
+
fprintf(stderr, "}\n");
|
418
|
+
}
|
419
|
+
|
420
|
+
/*
|
421
|
+
process completions that get queued to the watcher queue, translate events
|
422
|
+
to completions that the ruby side dispatches via callbacks.
|
423
|
+
|
424
|
+
The calling_ctx can be thought of as the outer shell that we discard in
|
425
|
+
this macro after pulling out the gooey delicious center.
|
426
|
+
*/
|
427
|
+
|
428
|
+
#define ZKH_SETUP_EVENT(qptr, eptr) \
|
429
|
+
zkrb_calling_context *ctx = (zkrb_calling_context *) calling_ctx; \
|
430
|
+
zkrb_event_t *eptr = zkrb_event_alloc(); \
|
431
|
+
eptr->req_id = ctx->req_id; \
|
432
|
+
zkrb_queue_t *qptr = ctx->queue; \
|
433
|
+
if (eptr->req_id != ZKRB_GLOBAL_REQ) zk_free(ctx)
|
434
|
+
|
435
|
+
void zkrb_state_callback(
|
436
|
+
zhandle_t *zh, int type, int state, const char *path, void *calling_ctx) {
|
437
|
+
|
438
|
+
zkrb_debug("ZOOKEEPER_C_STATE WATCHER "
|
439
|
+
"type = %d, state = %d, path = %p, value = %s",
|
440
|
+
type, state, (void *) path, path ? path : "NULL");
|
441
|
+
|
442
|
+
/* save callback context */
|
443
|
+
struct zkrb_watcher_completion *wc = zk_malloc(sizeof(struct zkrb_watcher_completion));
|
444
|
+
wc->type = type;
|
445
|
+
wc->state = state;
|
446
|
+
wc->path = strdup(path);
|
447
|
+
|
448
|
+
// This is unfortunate copy-pasta from ZKH_SETUP_EVENT with one change: we
|
449
|
+
// check type instead of the req_id to see if we need to free the ctx.
|
450
|
+
zkrb_calling_context *ctx = (zkrb_calling_context *) calling_ctx;
|
451
|
+
zkrb_event_t *event = zkrb_event_alloc();
|
452
|
+
event->req_id = ctx->req_id;
|
453
|
+
zkrb_queue_t *queue = ctx->queue;
|
454
|
+
if (type != ZOO_SESSION_EVENT) {
|
455
|
+
zk_free(ctx);
|
456
|
+
ctx = NULL;
|
457
|
+
}
|
458
|
+
|
459
|
+
event->type = ZKRB_WATCHER;
|
460
|
+
event->completion.watcher_completion = wc;
|
461
|
+
|
462
|
+
zkrb_enqueue(queue, event);
|
463
|
+
}
|
464
|
+
|
465
|
+
void zkrb_data_callback(
|
466
|
+
int rc, const char *value, int value_len, const struct Stat *stat, const void *calling_ctx) {
|
467
|
+
|
468
|
+
zkrb_debug("ZOOKEEPER_C_DATA WATCHER "
|
469
|
+
"rc = %d (%s), value = %s, len = %d",
|
470
|
+
rc, zerror(rc), value ? value : "NULL", value_len);
|
471
|
+
|
472
|
+
/* copy data completion */
|
473
|
+
struct zkrb_data_completion *dc = zk_malloc(sizeof(struct zkrb_data_completion));
|
474
|
+
dc->data = NULL;
|
475
|
+
dc->stat = NULL;
|
476
|
+
dc->data_len = 0;
|
477
|
+
|
478
|
+
if (value != NULL) {
|
479
|
+
dc->data = zk_malloc(value_len); // xmalloc may raise an exception, which means the above completion will leak
|
480
|
+
dc->data_len = value_len;
|
481
|
+
memcpy(dc->data, value, value_len);
|
482
|
+
}
|
483
|
+
|
484
|
+
if (stat != NULL) { dc->stat = zk_malloc(sizeof(struct Stat)); memcpy(dc->stat, stat, sizeof(struct Stat)); }
|
485
|
+
|
486
|
+
ZKH_SETUP_EVENT(queue, event);
|
487
|
+
event->rc = rc;
|
488
|
+
event->type = ZKRB_DATA;
|
489
|
+
event->completion.data_completion = dc;
|
490
|
+
|
491
|
+
zkrb_enqueue(queue, event);
|
492
|
+
}
|
493
|
+
|
494
|
+
void zkrb_stat_callback(
|
495
|
+
int rc, const struct Stat *stat, const void *calling_ctx) {
|
496
|
+
zkrb_debug("ZOOKEEPER_C_STAT WATCHER "
|
497
|
+
"rc = %d (%s)", rc, zerror(rc));
|
498
|
+
|
499
|
+
struct zkrb_stat_completion *sc = zk_malloc(sizeof(struct zkrb_stat_completion));
|
500
|
+
sc->stat = NULL;
|
501
|
+
if (stat != NULL) { sc->stat = zk_malloc(sizeof(struct Stat)); memcpy(sc->stat, stat, sizeof(struct Stat)); }
|
502
|
+
|
503
|
+
ZKH_SETUP_EVENT(queue, event);
|
504
|
+
event->rc = rc;
|
505
|
+
event->type = ZKRB_STAT;
|
506
|
+
event->completion.stat_completion = sc;
|
507
|
+
|
508
|
+
zkrb_enqueue(queue, event);
|
509
|
+
}
|
510
|
+
|
511
|
+
void zkrb_string_callback(
|
512
|
+
int rc, const char *string, const void *calling_ctx) {
|
513
|
+
|
514
|
+
zkrb_debug("ZOOKEEPER_C_STRING WATCHER "
|
515
|
+
"rc = %d (%s)", rc, zerror(rc));
|
516
|
+
|
517
|
+
struct zkrb_string_completion *sc = zk_malloc(sizeof(struct zkrb_string_completion));
|
518
|
+
sc->value = NULL;
|
519
|
+
if (string)
|
520
|
+
sc->value = strdup(string);
|
521
|
+
|
522
|
+
ZKH_SETUP_EVENT(queue, event);
|
523
|
+
event->rc = rc;
|
524
|
+
event->type = ZKRB_STRING;
|
525
|
+
event->completion.string_completion = sc;
|
526
|
+
|
527
|
+
zkrb_enqueue(queue, event);
|
528
|
+
}
|
529
|
+
|
530
|
+
void zkrb_strings_callback(
|
531
|
+
int rc, const struct String_vector *strings, const void *calling_ctx) {
|
532
|
+
zkrb_debug("ZOOKEEPER_C_STRINGS WATCHER "
|
533
|
+
"rc = %d (%s), calling_ctx = %p", rc, zerror(rc), calling_ctx);
|
534
|
+
|
535
|
+
/* copy string vector */
|
536
|
+
struct zkrb_strings_completion *sc = zk_malloc(sizeof(struct zkrb_strings_completion));
|
537
|
+
sc->values = (strings != NULL) ? zkrb_clone_string_vector(strings) : NULL;
|
538
|
+
|
539
|
+
ZKH_SETUP_EVENT(queue, event);
|
540
|
+
event->rc = rc;
|
541
|
+
event->type = ZKRB_STRINGS;
|
542
|
+
event->completion.strings_completion = sc;
|
543
|
+
|
544
|
+
zkrb_enqueue(queue, event);
|
545
|
+
}
|
546
|
+
|
547
|
+
void zkrb_strings_stat_callback(
|
548
|
+
int rc, const struct String_vector *strings, const struct Stat *stat, const void *calling_ctx) {
|
549
|
+
zkrb_debug("ZOOKEEPER_C_STRINGS_STAT WATCHER "
|
550
|
+
"rc = %d (%s), calling_ctx = %p", rc, zerror(rc), calling_ctx);
|
551
|
+
|
552
|
+
struct zkrb_strings_stat_completion *sc = zk_malloc(sizeof(struct zkrb_strings_stat_completion));
|
553
|
+
sc->stat = NULL;
|
554
|
+
if (stat != NULL) { sc->stat = zk_malloc(sizeof(struct Stat)); memcpy(sc->stat, stat, sizeof(struct Stat)); }
|
555
|
+
|
556
|
+
sc->values = (strings != NULL) ? zkrb_clone_string_vector(strings) : NULL;
|
557
|
+
|
558
|
+
ZKH_SETUP_EVENT(queue, event);
|
559
|
+
event->rc = rc;
|
560
|
+
event->type = ZKRB_STRINGS_STAT;
|
561
|
+
event->completion.strings_stat_completion = sc;
|
562
|
+
|
563
|
+
zkrb_enqueue(queue, event);
|
564
|
+
}
|
565
|
+
|
566
|
+
void zkrb_void_callback(int rc, const void *calling_ctx) {
|
567
|
+
zkrb_debug("ZOOKEEPER_C_VOID WATCHER "
|
568
|
+
"rc = %d (%s)", rc, zerror(rc));
|
569
|
+
|
570
|
+
ZKH_SETUP_EVENT(queue, event);
|
571
|
+
event->rc = rc;
|
572
|
+
event->type = ZKRB_VOID;
|
573
|
+
event->completion.void_completion = NULL;
|
574
|
+
|
575
|
+
zkrb_enqueue(queue, event);
|
576
|
+
}
|
577
|
+
|
578
|
+
void zkrb_acl_callback(
|
579
|
+
int rc, struct ACL_vector *acls, struct Stat *stat, const void *calling_ctx) {
|
580
|
+
zkrb_debug("ZOOKEEPER_C_ACL WATCHER rc = %d (%s)", rc, zerror(rc));
|
581
|
+
|
582
|
+
struct zkrb_acl_completion *ac = zk_malloc(sizeof(struct zkrb_acl_completion));
|
583
|
+
ac->acl = NULL;
|
584
|
+
ac->stat = NULL;
|
585
|
+
if (acls != NULL) { ac->acl = zkrb_clone_acl_vector(acls); }
|
586
|
+
if (stat != NULL) { ac->stat = zk_malloc(sizeof(struct Stat)); memcpy(ac->stat, stat, sizeof(struct Stat)); }
|
587
|
+
|
588
|
+
ZKH_SETUP_EVENT(queue, event);
|
589
|
+
event->rc = rc;
|
590
|
+
event->type = ZKRB_ACL;
|
591
|
+
event->completion.acl_completion = ac;
|
592
|
+
|
593
|
+
/* should be synchronized */
|
594
|
+
zkrb_enqueue(queue, event);
|
595
|
+
}
|
596
|
+
|
597
|
+
VALUE zkrb_id_to_ruby(struct Id *id) {
|
598
|
+
VALUE hash = rb_hash_new();
|
599
|
+
rb_hash_aset(hash, GET_SYM("scheme"), rb_str_new2(id->scheme));
|
600
|
+
rb_hash_aset(hash, GET_SYM("id"), rb_str_new2(id->id));
|
601
|
+
return hash;
|
602
|
+
}
|
603
|
+
|
604
|
+
VALUE zkrb_acl_to_ruby(struct ACL *acl) {
|
605
|
+
VALUE hash = rb_hash_new();
|
606
|
+
rb_hash_aset(hash, GET_SYM("perms"), INT2NUM(acl->perms));
|
607
|
+
rb_hash_aset(hash, GET_SYM("id"), zkrb_id_to_ruby(&(acl->id)));
|
608
|
+
return hash;
|
609
|
+
}
|
610
|
+
|
611
|
+
// [wickman] TODO test zkrb_ruby_to_aclvector
|
612
|
+
// [slyphon] TODO size checking on acl_ary (cast to int)
|
613
|
+
struct ACL_vector * zkrb_ruby_to_aclvector(VALUE acl_ary) {
|
614
|
+
Check_Type(acl_ary, T_ARRAY);
|
615
|
+
|
616
|
+
struct ACL_vector *v = zk_malloc(sizeof(struct ACL_vector));
|
617
|
+
allocate_ACL_vector(v, (int)RARRAY_LEN(acl_ary));
|
618
|
+
|
619
|
+
int k;
|
620
|
+
for (k = 0; k < v->count; ++k) {
|
621
|
+
VALUE acl_val = rb_ary_entry(acl_ary, k);
|
622
|
+
v->data[k] = zkrb_ruby_to_acl(acl_val);
|
623
|
+
}
|
624
|
+
|
625
|
+
return v;
|
626
|
+
}
|
627
|
+
|
628
|
+
// [wickman] TODO test zkrb_ruby_to_aclvector
|
629
|
+
struct ACL zkrb_ruby_to_acl(VALUE rubyacl) {
|
630
|
+
struct ACL acl;
|
631
|
+
|
632
|
+
VALUE perms = rb_iv_get(rubyacl, "@perms");
|
633
|
+
VALUE rubyid = rb_iv_get(rubyacl, "@id");
|
634
|
+
acl.perms = NUM2INT(perms);
|
635
|
+
acl.id = zkrb_ruby_to_id(rubyid);
|
636
|
+
|
637
|
+
return acl;
|
638
|
+
}
|
639
|
+
|
640
|
+
// [wickman] TODO zkrb_ruby_to_id error checking? test
|
641
|
+
struct Id zkrb_ruby_to_id(VALUE rubyid) {
|
642
|
+
struct Id id;
|
643
|
+
|
644
|
+
VALUE scheme = rb_iv_get(rubyid, "@scheme");
|
645
|
+
VALUE ident = rb_iv_get(rubyid, "@id");
|
646
|
+
|
647
|
+
if (scheme != Qnil) {
|
648
|
+
id.scheme = zk_malloc(RSTRING_LEN(scheme) + 1);
|
649
|
+
strncpy(id.scheme, RSTRING_PTR(scheme), RSTRING_LEN(scheme));
|
650
|
+
id.scheme[RSTRING_LEN(scheme)] = '\0';
|
651
|
+
} else {
|
652
|
+
id.scheme = NULL;
|
653
|
+
}
|
654
|
+
|
655
|
+
if (ident != Qnil) {
|
656
|
+
id.id = zk_malloc(RSTRING_LEN(ident) + 1);
|
657
|
+
strncpy(id.id, RSTRING_PTR(ident), RSTRING_LEN(ident));
|
658
|
+
id.id[RSTRING_LEN(ident)] = '\0';
|
659
|
+
} else {
|
660
|
+
id.id = NULL;
|
661
|
+
}
|
662
|
+
|
663
|
+
return id;
|
664
|
+
}
|
665
|
+
|
666
|
+
VALUE zkrb_acl_vector_to_ruby(struct ACL_vector *acl_vector) {
|
667
|
+
int i;
|
668
|
+
VALUE ary = rb_ary_new2(acl_vector->count);
|
669
|
+
for(i = 0; i < acl_vector->count; i++) {
|
670
|
+
rb_ary_push(ary, zkrb_acl_to_ruby(acl_vector->data+i));
|
671
|
+
}
|
672
|
+
return ary;
|
673
|
+
}
|
674
|
+
|
675
|
+
VALUE zkrb_string_vector_to_ruby(struct String_vector *string_vector) {
|
676
|
+
int i;
|
677
|
+
VALUE ary = rb_ary_new2(string_vector->count);
|
678
|
+
for(i = 0; i < string_vector->count; i++) {
|
679
|
+
rb_ary_push(ary, rb_str_new2(string_vector->data[i]));
|
680
|
+
}
|
681
|
+
return ary;
|
682
|
+
}
|
683
|
+
|
684
|
+
VALUE zkrb_stat_to_rarray(const struct Stat* stat) {
|
685
|
+
return rb_ary_new3(11,
|
686
|
+
LL2NUM(stat->czxid),
|
687
|
+
LL2NUM(stat->mzxid),
|
688
|
+
LL2NUM(stat->ctime),
|
689
|
+
LL2NUM(stat->mtime),
|
690
|
+
INT2NUM(stat->version),
|
691
|
+
INT2NUM(stat->cversion),
|
692
|
+
INT2NUM(stat->aversion),
|
693
|
+
LL2NUM(stat->ephemeralOwner),
|
694
|
+
INT2NUM(stat->dataLength),
|
695
|
+
INT2NUM(stat->numChildren),
|
696
|
+
LL2NUM(stat->pzxid));
|
697
|
+
}
|
698
|
+
|
699
|
+
VALUE zkrb_stat_to_rhash(const struct Stat *stat) {
|
700
|
+
VALUE ary = rb_hash_new();
|
701
|
+
rb_hash_aset(ary, GET_SYM("czxid"), LL2NUM(stat->czxid));
|
702
|
+
rb_hash_aset(ary, GET_SYM("mzxid"), LL2NUM(stat->mzxid));
|
703
|
+
rb_hash_aset(ary, GET_SYM("ctime"), LL2NUM(stat->ctime));
|
704
|
+
rb_hash_aset(ary, GET_SYM("mtime"), LL2NUM(stat->mtime));
|
705
|
+
rb_hash_aset(ary, GET_SYM("version"), INT2NUM(stat->version));
|
706
|
+
rb_hash_aset(ary, GET_SYM("cversion"), INT2NUM(stat->cversion));
|
707
|
+
rb_hash_aset(ary, GET_SYM("aversion"), INT2NUM(stat->aversion));
|
708
|
+
rb_hash_aset(ary, GET_SYM("ephemeralOwner"), LL2NUM(stat->ephemeralOwner));
|
709
|
+
rb_hash_aset(ary, GET_SYM("dataLength"), INT2NUM(stat->dataLength));
|
710
|
+
rb_hash_aset(ary, GET_SYM("numChildren"), INT2NUM(stat->numChildren));
|
711
|
+
rb_hash_aset(ary, GET_SYM("pzxid"), LL2NUM(stat->pzxid));
|
712
|
+
return ary;
|
713
|
+
}
|
714
|
+
|
715
|
+
// [wickman] TODO test zkrb_clone_acl_vector
|
716
|
+
struct ACL_vector * zkrb_clone_acl_vector(struct ACL_vector * src) {
|
717
|
+
struct ACL_vector * dst = zk_malloc(sizeof(struct ACL_vector));
|
718
|
+
allocate_ACL_vector(dst, src->count);
|
719
|
+
int k;
|
720
|
+
for (k = 0; k < src->count; ++k) {
|
721
|
+
struct ACL * elt = &src->data[k];
|
722
|
+
dst->data[k].id.scheme = strdup(elt->id.scheme);
|
723
|
+
dst->data[k].id.id = strdup(elt->id.id);
|
724
|
+
dst->data[k].perms = elt->perms;
|
725
|
+
}
|
726
|
+
return dst;
|
727
|
+
}
|
728
|
+
|
729
|
+
// [wickman] TODO test zkrb_clone_string_vector
|
730
|
+
struct String_vector * zkrb_clone_string_vector(const struct String_vector * src) {
|
731
|
+
struct String_vector * dst = zk_malloc(sizeof(struct String_vector));
|
732
|
+
allocate_String_vector(dst, src->count);
|
733
|
+
int k;
|
734
|
+
for (k = 0; k < src->count; ++k) {
|
735
|
+
dst->data[k] = strdup(src->data[k]);
|
736
|
+
}
|
737
|
+
return dst;
|
738
|
+
}
|
739
|
+
|
740
|
+
// vim:sts=2:sw=2:et
|