slyphon-zookeeper 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,604 @@
1
+ /* Ruby wrapper for the Zookeeper C API
2
+
3
+ This file contains three sets of helpers:
4
+ - the event queue that glues RB<->C together
5
+ - the completions that marshall data between RB<->C formats
6
+ - functions for translating between Ruby and C versions of ZK datatypes
7
+
8
+ wickman@twitter.com
9
+ */
10
+
11
+ #include "ruby.h"
12
+ #include "zookeeper_lib.h"
13
+ #include "c-client-src/zookeeper.h"
14
+ #include <errno.h>
15
+ #include <stdio.h>
16
+ #include <stdlib.h>
17
+ #include <pthread.h>
18
+ #include <unistd.h>
19
+
20
+ #define GET_SYM(str) ID2SYM(rb_intern(str))
21
+
22
+ int ZKRBDebugging;
23
+
24
+ pthread_mutex_t zkrb_q_mutex = PTHREAD_MUTEX_INITIALIZER;
25
+
26
+ /* push/pop is a misnomer, this is a queue */
27
+ void zkrb_enqueue(zkrb_queue_t *q, zkrb_event_t *elt) {
28
+ pthread_mutex_lock(&zkrb_q_mutex);
29
+ if (q == NULL || q->tail == NULL) {
30
+ pthread_mutex_unlock(&zkrb_q_mutex);
31
+ return;
32
+ }
33
+ q->tail->event = elt;
34
+ q->tail->next = (struct zkrb_event_ll_t *) malloc(sizeof(struct zkrb_event_ll_t));
35
+ q->tail = q->tail->next;
36
+ q->tail->event = NULL;
37
+ q->tail->next = NULL;
38
+ ssize_t ret = write(q->pipe_write, "0", 1); /* Wake up Ruby listener */
39
+ pthread_mutex_unlock(&zkrb_q_mutex);
40
+ if (ret == -1)
41
+ rb_raise(rb_eRuntimeError, "write to pipe failed: %d", errno);
42
+ }
43
+
44
+ zkrb_event_t * zkrb_peek(zkrb_queue_t *q) {
45
+ pthread_mutex_lock(&zkrb_q_mutex);
46
+ zkrb_event_t *event = NULL;
47
+ if (q != NULL && q->head != NULL && q->head->event != NULL) {
48
+ event = q->head->event;
49
+ }
50
+ pthread_mutex_unlock(&zkrb_q_mutex);
51
+ return event;
52
+ }
53
+
54
+ zkrb_event_t* zkrb_dequeue(zkrb_queue_t *q, int need_lock) {
55
+ if (need_lock)
56
+ pthread_mutex_lock(&zkrb_q_mutex);
57
+ if (q == NULL || q->head == NULL || q->head->event == NULL) {
58
+ if (need_lock)
59
+ pthread_mutex_unlock(&zkrb_q_mutex);
60
+ return NULL;
61
+ } else {
62
+ struct zkrb_event_ll_t *old_root = q->head;
63
+ q->head = q->head->next;
64
+ zkrb_event_t *rv = old_root->event;
65
+ free(old_root);
66
+ if (need_lock)
67
+ pthread_mutex_unlock(&zkrb_q_mutex);
68
+ return rv;
69
+ }
70
+ }
71
+
72
+ zkrb_queue_t *zkrb_queue_alloc(void) {
73
+ int pfd[2];
74
+ if (pipe(pfd) == -1)
75
+ rb_raise(rb_eRuntimeError, "create of pipe failed: %d", errno);
76
+
77
+ pthread_mutex_lock(&zkrb_q_mutex);
78
+ zkrb_queue_t *rq = malloc(sizeof(zkrb_queue_t));
79
+ rq->head = malloc(sizeof(struct zkrb_event_ll_t));
80
+ rq->head->event = NULL; rq->head->next = NULL;
81
+ rq->tail = rq->head;
82
+ rq->pipe_read = pfd[0];
83
+ rq->pipe_write = pfd[1];
84
+ pthread_mutex_unlock(&zkrb_q_mutex);
85
+
86
+ return rq;
87
+ }
88
+
89
+ void zkrb_queue_free(zkrb_queue_t *queue) {
90
+ pthread_mutex_lock(&zkrb_q_mutex);
91
+ if (queue == NULL) {
92
+ pthread_mutex_unlock(&zkrb_q_mutex);
93
+ return;
94
+ }
95
+
96
+ zkrb_event_t *elt;
97
+ while ((elt = zkrb_dequeue(queue, 0)) != NULL) {
98
+ zkrb_event_free(elt);
99
+ }
100
+ free(queue->head);
101
+ close(queue->pipe_read);
102
+ close(queue->pipe_write);
103
+ free(queue);
104
+ pthread_mutex_unlock(&zkrb_q_mutex);
105
+ }
106
+
107
+ zkrb_event_t *zkrb_event_alloc(void) {
108
+ zkrb_event_t *rv = (zkrb_event_t *) malloc(sizeof(zkrb_event_t));
109
+ return rv;
110
+ }
111
+
112
+ void zkrb_event_free(zkrb_event_t *event) {
113
+ switch (event->type) {
114
+ case ZKRB_DATA: {
115
+ struct zkrb_data_completion *data_ctx = event->completion.data_completion;
116
+ free(data_ctx->data);
117
+ free(data_ctx->stat);
118
+ free(data_ctx);
119
+ break;
120
+ }
121
+ case ZKRB_STAT: {
122
+ struct zkrb_stat_completion *stat_ctx = event->completion.stat_completion;
123
+ free(stat_ctx->stat);
124
+ free(stat_ctx);
125
+ break;
126
+ }
127
+ case ZKRB_STRING: {
128
+ struct zkrb_string_completion *string_ctx = event->completion.string_completion;
129
+ free(string_ctx->value);
130
+ free(string_ctx);
131
+ break;
132
+ }
133
+ case ZKRB_STRINGS: {
134
+ struct zkrb_strings_completion *strings_ctx = event->completion.strings_completion;
135
+ int k;
136
+ for (k = 0; k < strings_ctx->values->count; ++k) free(strings_ctx->values->data[k]);
137
+ free(strings_ctx->values);
138
+ free(strings_ctx);
139
+ break;
140
+ }
141
+ case ZKRB_STRINGS_STAT: {
142
+ struct zkrb_strings_stat_completion *strings_stat_ctx = event->completion.strings_stat_completion;
143
+ int k;
144
+ for (k = 0; k < strings_stat_ctx->values->count; ++k) free(strings_stat_ctx->values->data[k]);
145
+ free(strings_stat_ctx->values);
146
+ free(strings_stat_ctx->stat);
147
+ free(strings_stat_ctx);
148
+ break;
149
+ }
150
+ case ZKRB_ACL: {
151
+ struct zkrb_acl_completion *acl_ctx = event->completion.acl_completion;
152
+ if (acl_ctx->acl) {
153
+ deallocate_ACL_vector(acl_ctx->acl);
154
+ free(acl_ctx->acl);
155
+ }
156
+ free(acl_ctx->stat);
157
+ free(acl_ctx);
158
+ break;
159
+ }
160
+ case ZKRB_WATCHER: {
161
+ struct zkrb_watcher_completion *watcher_ctx = event->completion.watcher_completion;
162
+ free(watcher_ctx->path);
163
+ free(watcher_ctx);
164
+ break;
165
+ }
166
+ case ZKRB_VOID: {
167
+ break;
168
+ }
169
+
170
+ default:
171
+ #warning [wickman] TODO raise an exception?
172
+ fprintf(stderr, "ERROR?\n");
173
+ }
174
+ free(event);
175
+ }
176
+
177
+ /* this is called only from a method_get_latest_event, so the hash is
178
+ allocated on the proper thread stack */
179
+ VALUE zkrb_event_to_ruby(zkrb_event_t *event) {
180
+ VALUE hash = rb_hash_new();
181
+
182
+ rb_hash_aset(hash, GET_SYM("req_id"), LL2NUM(event->req_id));
183
+ if (event->type != ZKRB_WATCHER)
184
+ rb_hash_aset(hash, GET_SYM("rc"), INT2FIX(event->rc));
185
+
186
+ switch (event->type) {
187
+ case ZKRB_DATA: {
188
+ struct zkrb_data_completion *data_ctx = event->completion.data_completion;
189
+ if (ZKRBDebugging) zkrb_print_stat(data_ctx->stat);
190
+ rb_hash_aset(hash, GET_SYM("data"), data_ctx->data ? rb_str_new(data_ctx->data, data_ctx->data_len) : Qnil);
191
+ rb_hash_aset(hash, GET_SYM("stat"), data_ctx->stat ? zkrb_stat_to_rarray(data_ctx->stat) : Qnil);
192
+ break;
193
+ }
194
+ case ZKRB_STAT: {
195
+ struct zkrb_stat_completion *stat_ctx = event->completion.stat_completion;
196
+ rb_hash_aset(hash, GET_SYM("stat"), stat_ctx->stat ? zkrb_stat_to_rarray(stat_ctx->stat) : Qnil);
197
+ break;
198
+ }
199
+ case ZKRB_STRING: {
200
+ struct zkrb_string_completion *string_ctx = event->completion.string_completion;
201
+ rb_hash_aset(hash, GET_SYM("string"), string_ctx->value ? rb_str_new2(string_ctx->value) : Qnil);
202
+ break;
203
+ }
204
+ case ZKRB_STRINGS: {
205
+ struct zkrb_strings_completion *strings_ctx = event->completion.strings_completion;
206
+ rb_hash_aset(hash, GET_SYM("strings"), strings_ctx->values ? zkrb_string_vector_to_ruby(strings_ctx->values) : Qnil);
207
+ break;
208
+ }
209
+ case ZKRB_STRINGS_STAT: {
210
+ struct zkrb_strings_stat_completion *strings_stat_ctx = event->completion.strings_stat_completion;
211
+ rb_hash_aset(hash, GET_SYM("strings"), strings_stat_ctx->values ? zkrb_string_vector_to_ruby(strings_stat_ctx->values) : Qnil);
212
+ rb_hash_aset(hash, GET_SYM("stat"), strings_stat_ctx->stat ? zkrb_stat_to_rarray(strings_stat_ctx->stat) : Qnil);
213
+ break;
214
+ }
215
+ case ZKRB_ACL: {
216
+ struct zkrb_acl_completion *acl_ctx = event->completion.acl_completion;
217
+ rb_hash_aset(hash, GET_SYM("acl"), acl_ctx->acl ? zkrb_acl_vector_to_ruby(acl_ctx->acl) : Qnil);
218
+ rb_hash_aset(hash, GET_SYM("stat"), acl_ctx->stat ? zkrb_stat_to_rarray(acl_ctx->stat) : Qnil);
219
+ break;
220
+ }
221
+ case ZKRB_WATCHER: {
222
+ struct zkrb_watcher_completion *watcher_ctx = event->completion.watcher_completion;
223
+ rb_hash_aset(hash, GET_SYM("type"), INT2FIX(watcher_ctx->type));
224
+ rb_hash_aset(hash, GET_SYM("state"), INT2FIX(watcher_ctx->state));
225
+ rb_hash_aset(hash, GET_SYM("path"), watcher_ctx->path ? rb_str_new2(watcher_ctx->path) : Qnil);
226
+ break;
227
+ }
228
+ case ZKRB_VOID:
229
+ default:
230
+ break;
231
+ }
232
+
233
+ return hash;
234
+ }
235
+
236
+ void zkrb_print_stat(const struct Stat *s) {
237
+ fprintf(stderr, "stat {\n");
238
+ if (s != NULL) {
239
+ fprintf(stderr, "\t czxid: %lld\n", s->czxid);
240
+ fprintf(stderr, "\t mzxid: %lld\n", s->mzxid);
241
+ fprintf(stderr, "\t ctime: %lld\n", s->ctime);
242
+ fprintf(stderr, "\t mtime: %lld\n", s->mtime);
243
+ fprintf(stderr, "\t version: %d\n" , s->version);
244
+ fprintf(stderr, "\t cversion: %d\n" , s->cversion);
245
+ fprintf(stderr, "\t aversion: %d\n" , s->aversion);
246
+ fprintf(stderr, "\t ephemeralOwner: %lld\n", s->ephemeralOwner);
247
+ fprintf(stderr, "\t dataLength: %d\n" , s->dataLength);
248
+ fprintf(stderr, "\t numChildren: %d\n" , s->numChildren);
249
+ fprintf(stderr, "\t pzxid: %lld\n", s->pzxid);
250
+ } else {
251
+ fprintf(stderr, "\tNULL\n");
252
+ }
253
+ fprintf(stderr, "}\n");
254
+ }
255
+
256
+ zkrb_calling_context *zkrb_calling_context_alloc(int64_t req_id, zkrb_queue_t *queue) {
257
+ zkrb_calling_context *ctx = malloc(sizeof(zkrb_calling_context));
258
+ ctx->req_id = req_id;
259
+ ctx->queue = queue;
260
+ return ctx;
261
+ }
262
+
263
+ void zkrb_print_calling_context(zkrb_calling_context *ctx) {
264
+ fprintf(stderr, "calling context (%p){\n", ctx);
265
+ fprintf(stderr, "\treq_id = %lld\n", ctx->req_id);
266
+ fprintf(stderr, "\tqueue = %p\n", ctx->queue);
267
+ fprintf(stderr, "}\n");
268
+ }
269
+
270
+ /*
271
+ process completions that get queued to the watcher queue, translate events
272
+ to completions that the ruby side dispatches via callbacks.
273
+
274
+ The calling_ctx can be thought of as the outer shell that we discard in
275
+ this macro after pulling out the gooey delicious center.
276
+ */
277
+
278
+ #define ZKH_SETUP_EVENT(qptr, eptr) \
279
+ zkrb_calling_context *ctx = (zkrb_calling_context *) calling_ctx; \
280
+ zkrb_event_t *eptr = zkrb_event_alloc(); \
281
+ eptr->req_id = ctx->req_id; \
282
+ zkrb_queue_t *qptr = ctx->queue; \
283
+ if (eptr->req_id != ZKRB_GLOBAL_REQ) free(ctx)
284
+
285
+ void zkrb_state_callback(
286
+ zhandle_t *zh, int type, int state, const char *path, void *calling_ctx) {
287
+ /* logging */
288
+ if (ZKRBDebugging) {
289
+ fprintf(stderr, "ZOOKEEPER_C_STATE WATCHER "
290
+ "type = %d, state = %d, path = %p, value = %s\n",
291
+ type, state, (void *) path, path ? path : "NULL");
292
+ }
293
+
294
+ /* save callback context */
295
+ struct zkrb_watcher_completion *wc = malloc(sizeof(struct zkrb_watcher_completion));
296
+ wc->type = type;
297
+ wc->state = state;
298
+ wc->path = strdup(path);
299
+
300
+ // This is unfortunate copy-pasta from ZKH_SETUP_EVENT with one change: we
301
+ // check type instead of the req_id to see if we need to free the ctx.
302
+ zkrb_calling_context *ctx = (zkrb_calling_context *) calling_ctx;
303
+ zkrb_event_t *event = zkrb_event_alloc();
304
+ event->req_id = ctx->req_id;
305
+ zkrb_queue_t *queue = ctx->queue;
306
+ if (type != ZOO_SESSION_EVENT) {
307
+ free(ctx);
308
+ ctx = NULL;
309
+ }
310
+
311
+ event->type = ZKRB_WATCHER;
312
+ event->completion.watcher_completion = wc;
313
+
314
+ zkrb_enqueue(queue, event);
315
+ }
316
+
317
+
318
+
319
+ void zkrb_data_callback(
320
+ int rc, const char *value, int value_len, const struct Stat *stat, const void *calling_ctx) {
321
+ if (ZKRBDebugging) {
322
+ fprintf(stderr, "ZOOKEEPER_C_DATA WATCHER "
323
+ "rc = %d (%s), value = %s, len = %d\n",
324
+ rc, zerror(rc), value ? value : "NULL", value_len);
325
+ }
326
+
327
+ /* copy data completion */
328
+ struct zkrb_data_completion *dc = malloc(sizeof(struct zkrb_data_completion));
329
+ dc->data = NULL;
330
+ dc->stat = NULL;
331
+ dc->data_len = 0;
332
+
333
+ if (value != NULL) {
334
+ dc->data = malloc(value_len);
335
+ dc->data_len = value_len;
336
+ memcpy(dc->data, value, value_len);
337
+ }
338
+
339
+ if (stat != NULL) { dc->stat = malloc(sizeof(struct Stat)); memcpy(dc->stat, stat, sizeof(struct Stat)); }
340
+
341
+ ZKH_SETUP_EVENT(queue, event);
342
+ event->rc = rc;
343
+ event->type = ZKRB_DATA;
344
+ event->completion.data_completion = dc;
345
+
346
+ zkrb_enqueue(queue, event);
347
+ }
348
+
349
+ void zkrb_stat_callback(
350
+ int rc, const struct Stat *stat, const void *calling_ctx) {
351
+ if (ZKRBDebugging) {
352
+ fprintf(stderr, "ZOOKEEPER_C_STAT WATCHER "
353
+ "rc = %d (%s)\n", rc, zerror(rc));
354
+ }
355
+
356
+ struct zkrb_stat_completion *sc = malloc(sizeof(struct zkrb_stat_completion));
357
+ sc->stat = NULL;
358
+ if (stat != NULL) { sc->stat = malloc(sizeof(struct Stat)); memcpy(sc->stat, stat, sizeof(struct Stat)); }
359
+
360
+ ZKH_SETUP_EVENT(queue, event);
361
+ event->rc = rc;
362
+ event->type = ZKRB_STAT;
363
+ event->completion.stat_completion = sc;
364
+
365
+ zkrb_enqueue(queue, event);
366
+ }
367
+
368
+ void zkrb_string_callback(
369
+ int rc, const char *string, const void *calling_ctx) {
370
+ if (ZKRBDebugging) {
371
+ fprintf(stderr, "ZOOKEEPER_C_STRING WATCHER "
372
+ "rc = %d (%s)\n", rc, zerror(rc));
373
+ }
374
+
375
+ struct zkrb_string_completion *sc = malloc(sizeof(struct zkrb_string_completion));
376
+ sc->value = NULL;
377
+ if (string)
378
+ sc->value = strdup(string);
379
+
380
+ ZKH_SETUP_EVENT(queue, event);
381
+ event->rc = rc;
382
+ event->type = ZKRB_STRING;
383
+ event->completion.string_completion = sc;
384
+
385
+ zkrb_enqueue(queue, event);
386
+ }
387
+
388
+ void zkrb_strings_callback(
389
+ int rc, const struct String_vector *strings, const void *calling_ctx) {
390
+ if (ZKRBDebugging) {
391
+ fprintf(stderr, "ZOOKEEPER_C_STRINGS WATCHER "
392
+ "rc = %d (%s), calling_ctx = %p\n", rc, zerror(rc), calling_ctx);
393
+ }
394
+
395
+ /* copy string vector */
396
+ struct zkrb_strings_completion *sc = malloc(sizeof(struct zkrb_strings_completion));
397
+ sc->values = (strings != NULL) ? zkrb_clone_string_vector(strings) : NULL;
398
+
399
+ ZKH_SETUP_EVENT(queue, event);
400
+ event->rc = rc;
401
+ event->type = ZKRB_STRINGS;
402
+ event->completion.strings_completion = sc;
403
+
404
+ zkrb_enqueue(queue, event);
405
+ }
406
+
407
+ void zkrb_strings_stat_callback(
408
+ int rc, const struct String_vector *strings, const struct Stat *stat, const void *calling_ctx) {
409
+ if (ZKRBDebugging) {
410
+ fprintf(stderr, "ZOOKEEPER_C_STRINGS_STAT WATCHER "
411
+ "rc = %d (%s), calling_ctx = %p\n", rc, zerror(rc), calling_ctx);
412
+ }
413
+
414
+ struct zkrb_strings_stat_completion *sc = malloc(sizeof(struct zkrb_strings_stat_completion));
415
+ sc->stat = NULL;
416
+ if (stat != NULL) { sc->stat = malloc(sizeof(struct Stat)); memcpy(sc->stat, stat, sizeof(struct Stat)); }
417
+ sc->values = (strings != NULL) ? zkrb_clone_string_vector(strings) : NULL;
418
+
419
+ ZKH_SETUP_EVENT(queue, event);
420
+ event->rc = rc;
421
+ event->type = ZKRB_STRINGS_STAT;
422
+ event->completion.strings_stat_completion = sc;
423
+
424
+ zkrb_enqueue(queue, event);
425
+ }
426
+
427
+ void zkrb_void_callback(
428
+ int rc, const void *calling_ctx) {
429
+ if (ZKRBDebugging) {
430
+ fprintf(stderr, "ZOOKEEPER_C_VOID WATCHER "
431
+ "rc = %d (%s)\n", rc, zerror(rc));
432
+ }
433
+
434
+ ZKH_SETUP_EVENT(queue, event);
435
+ event->rc = rc;
436
+ event->type = ZKRB_VOID;
437
+ event->completion.void_completion = NULL;
438
+
439
+ zkrb_enqueue(queue, event);
440
+ }
441
+
442
+ void zkrb_acl_callback(
443
+ int rc, struct ACL_vector *acls, struct Stat *stat, const void *calling_ctx) {
444
+ if (ZKRBDebugging) {
445
+ fprintf(stderr, "ZOOKEEPER_C_ACL WATCHER "
446
+ "rc = %d (%s)\n", rc, zerror(rc));
447
+ }
448
+
449
+ struct zkrb_acl_completion *ac = malloc(sizeof(struct zkrb_acl_completion));
450
+ ac->acl = NULL;
451
+ ac->stat = NULL;
452
+ if (acls != NULL) { ac->acl = zkrb_clone_acl_vector(acls); }
453
+ if (stat != NULL) { ac->stat = malloc(sizeof(struct Stat)); memcpy(ac->stat, stat, sizeof(struct Stat)); }
454
+
455
+ ZKH_SETUP_EVENT(queue, event);
456
+ event->rc = rc;
457
+ event->type = ZKRB_ACL;
458
+ event->completion.acl_completion = ac;
459
+
460
+ /* should be synchronized */
461
+ zkrb_enqueue(queue, event);
462
+ }
463
+
464
+ VALUE zkrb_id_to_ruby(struct Id *id) {
465
+ VALUE hash = rb_hash_new();
466
+ rb_hash_aset(hash, GET_SYM("scheme"), rb_str_new2(id->scheme));
467
+ rb_hash_aset(hash, GET_SYM("id"), rb_str_new2(id->id));
468
+ return hash;
469
+ }
470
+
471
+ VALUE zkrb_acl_to_ruby(struct ACL *acl) {
472
+ VALUE hash = rb_hash_new();
473
+ rb_hash_aset(hash, GET_SYM("perms"), INT2NUM(acl->perms));
474
+ rb_hash_aset(hash, GET_SYM("id"), zkrb_id_to_ruby(&(acl->id)));
475
+ return hash;
476
+ }
477
+
478
+ #warning [wickman] TODO test zkrb_ruby_to_aclvector
479
+ struct ACL_vector * zkrb_ruby_to_aclvector(VALUE acl_ary) {
480
+ Check_Type(acl_ary, T_ARRAY);
481
+
482
+ struct ACL_vector *v = malloc(sizeof(struct ACL_vector));
483
+ allocate_ACL_vector(v, RARRAY_LEN(acl_ary));
484
+
485
+ int k;
486
+ for (k = 0; k < v->count; ++k) {
487
+ VALUE acl_val = rb_ary_entry(acl_ary, k);
488
+ v->data[k] = zkrb_ruby_to_acl(acl_val);
489
+ }
490
+
491
+ return v;
492
+ }
493
+
494
+ #warning [wickman] TODO test zkrb_ruby_to_aclvector
495
+ struct ACL zkrb_ruby_to_acl(VALUE rubyacl) {
496
+ struct ACL acl;
497
+
498
+ VALUE perms = rb_iv_get(rubyacl, "@perms");
499
+ VALUE rubyid = rb_iv_get(rubyacl, "@id");
500
+ acl.perms = NUM2INT(perms);
501
+ acl.id = zkrb_ruby_to_id(rubyid);
502
+
503
+ return acl;
504
+ }
505
+
506
+ #warning [wickman] TODO zkrb_ruby_to_id error checking? test
507
+ struct Id zkrb_ruby_to_id(VALUE rubyid) {
508
+ struct Id id;
509
+
510
+ VALUE scheme = rb_iv_get(rubyid, "@scheme");
511
+ VALUE ident = rb_iv_get(rubyid, "@id");
512
+
513
+ if (scheme != Qnil) {
514
+ id.scheme = malloc(RSTRING_LEN(scheme) + 1);
515
+ strncpy(id.scheme, RSTRING_PTR(scheme), RSTRING_LEN(scheme));
516
+ id.scheme[RSTRING_LEN(scheme)] = '\0';
517
+ } else {
518
+ id.scheme = NULL;
519
+ }
520
+
521
+ if (ident != Qnil) {
522
+ id.id = malloc(RSTRING_LEN(ident) + 1);
523
+ strncpy(id.id, RSTRING_PTR(ident), RSTRING_LEN(ident));
524
+ id.id[RSTRING_LEN(ident)] = '\0';
525
+ } else {
526
+ id.id = NULL;
527
+ }
528
+
529
+ return id;
530
+ }
531
+
532
+ VALUE zkrb_acl_vector_to_ruby(struct ACL_vector *acl_vector) {
533
+ int i;
534
+ VALUE ary = rb_ary_new2(acl_vector->count);
535
+ for(i = 0; i < acl_vector->count; i++) {
536
+ rb_ary_push(ary, zkrb_acl_to_ruby(acl_vector->data+i));
537
+ }
538
+ return ary;
539
+ }
540
+
541
+ VALUE zkrb_string_vector_to_ruby(struct String_vector *string_vector) {
542
+ int i;
543
+ VALUE ary = rb_ary_new2(string_vector->count);
544
+ for(i = 0; i < string_vector->count; i++) {
545
+ rb_ary_push(ary, rb_str_new2(string_vector->data[i]));
546
+ }
547
+ return ary;
548
+ }
549
+
550
+ VALUE zkrb_stat_to_rarray(const struct Stat* stat) {
551
+ return rb_ary_new3(11,
552
+ LL2NUM(stat->czxid),
553
+ LL2NUM(stat->mzxid),
554
+ LL2NUM(stat->ctime),
555
+ LL2NUM(stat->mtime),
556
+ INT2NUM(stat->version),
557
+ INT2NUM(stat->cversion),
558
+ INT2NUM(stat->aversion),
559
+ LL2NUM(stat->ephemeralOwner),
560
+ INT2NUM(stat->dataLength),
561
+ INT2NUM(stat->numChildren),
562
+ LL2NUM(stat->pzxid));
563
+ }
564
+
565
+ VALUE zkrb_stat_to_rhash(const struct Stat *stat) {
566
+ VALUE ary = rb_hash_new();
567
+ rb_hash_aset(ary, GET_SYM("czxid"), LL2NUM(stat->czxid));
568
+ rb_hash_aset(ary, GET_SYM("mzxid"), LL2NUM(stat->mzxid));
569
+ rb_hash_aset(ary, GET_SYM("ctime"), LL2NUM(stat->ctime));
570
+ rb_hash_aset(ary, GET_SYM("mtime"), LL2NUM(stat->mtime));
571
+ rb_hash_aset(ary, GET_SYM("version"), INT2NUM(stat->version));
572
+ rb_hash_aset(ary, GET_SYM("cversion"), INT2NUM(stat->cversion));
573
+ rb_hash_aset(ary, GET_SYM("aversion"), INT2NUM(stat->aversion));
574
+ rb_hash_aset(ary, GET_SYM("ephemeralOwner"), LL2NUM(stat->ephemeralOwner));
575
+ rb_hash_aset(ary, GET_SYM("dataLength"), INT2NUM(stat->dataLength));
576
+ rb_hash_aset(ary, GET_SYM("numChildren"), INT2NUM(stat->numChildren));
577
+ rb_hash_aset(ary, GET_SYM("pzxid"), LL2NUM(stat->pzxid));
578
+ return ary;
579
+ }
580
+
581
+ #warning [wickman] TODO test zkrb_clone_acl_vector
582
+ struct ACL_vector * zkrb_clone_acl_vector(struct ACL_vector * src) {
583
+ struct ACL_vector * dst = malloc(sizeof(struct ACL_vector));
584
+ allocate_ACL_vector(dst, src->count);
585
+ int k;
586
+ for (k = 0; k < src->count; ++k) {
587
+ struct ACL * elt = &src->data[k];
588
+ dst->data[k].id.scheme = strdup(elt->id.scheme);
589
+ dst->data[k].id.id = strdup(elt->id.id);
590
+ dst->data[k].perms = elt->perms;
591
+ }
592
+ return dst;
593
+ }
594
+
595
+ #warning [wickman] TODO test zkrb_clone_string_vector
596
+ struct String_vector * zkrb_clone_string_vector(const struct String_vector * src) {
597
+ struct String_vector * dst = malloc(sizeof(struct String_vector));
598
+ allocate_String_vector(dst, src->count);
599
+ int k;
600
+ for (k = 0; k < src->count; ++k) {
601
+ dst->data[k] = strdup(src->data[k]);
602
+ }
603
+ return dst;
604
+ }