hermann 0.11-x86-darwin-12 → 0.15-x86-darwin-12

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,1029 @@
1
+ /*
2
+ * hermann_lib.c - Ruby wrapper for the librdkafka library
3
+ *
4
+ * Copyright (c) 2014 Stan Campbell
5
+ * Copyright (c) 2014 Lookout, Inc.
6
+ * Copyright (c) 2014 R. Tyler Croy
7
+ *
8
+ * All rights reserved.
9
+ *
10
+ * Redistribution and use in source and binary forms, with or without
11
+ * modification, are permitted provided that the following conditions are met:
12
+ *
13
+ * 1. Redistributions of source code must retain the above copyright notice,
14
+ * this list of conditions and the following disclaimer.
15
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
16
+ * this list of conditions and the following disclaimer in the documentation
17
+ * and/or other materials provided with the distribution.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ * POSSIBILITY OF SUCH DAMAGE.
30
+ */
31
+
32
+ /* Much of the librdkafka library calls were lifted from rdkafka_example.c */
33
+
34
+ #include "hermann_lib.h"
35
+
36
+ /**
37
+ * Convenience function
38
+ *
39
+ * @param config HermannInstanceConfig
40
+ * @param outputStream FILE*
41
+ *
42
+ * Log the contents of the configuration to the provided stream.
43
+ */
44
+ void fprintf_hermann_instance_config(HermannInstanceConfig *config,
45
+ FILE *outputStream) {
46
+
47
+ const char *topic = NULL;
48
+ const char *brokers = NULL;
49
+ int isRkSet = -1;
50
+ int isRktSet = -1;
51
+ int partition = -1;
52
+ int isInitialized = -1;
53
+
54
+ if (NULL == config) {
55
+ fprintf(outputStream, "NULL configuration");
56
+ }
57
+ else {
58
+ isRkSet = (config->rk != NULL);
59
+ isRktSet = (config->rkt != NULL);
60
+
61
+ if (NULL == config->topic) {
62
+ topic = NULL;
63
+ }
64
+ else {
65
+ topic = config->topic;
66
+ }
67
+
68
+ if (NULL == config->brokers) {
69
+ brokers = "NULL";
70
+ }
71
+ else {
72
+ brokers = config->brokers;
73
+ }
74
+
75
+ partition = config->partition;
76
+ isInitialized = config->isInitialized;
77
+ }
78
+
79
+ fprintf(outputStream, "{ topic: %s, brokers: %s, partition: %d, isInitialized: %d, rkSet: %d, rkTSet: %d }\n",
80
+ topic, brokers, partition, isInitialized, isRkSet, isRktSet );
81
+ }
82
+
83
+ /**
84
+ * Message delivery report callback.
85
+ * Called once for each message.
86
+ *
87
+ */
88
+ static void msg_delivered(rd_kafka_t *rk,
89
+ const rd_kafka_message_t *message,
90
+ void *ctx) {
91
+ hermann_push_ctx_t *push_ctx;
92
+ VALUE is_error = Qfalse;
93
+ ID hermann_result_fulfill_method = rb_intern("internal_set_value");
94
+
95
+ TRACER("ctx: %p, err: %i\n", ctx, message->err);
96
+
97
+ if (message->err) {
98
+ is_error = Qtrue;
99
+ fprintf(stderr, "%% Message delivery failed: %s\n",
100
+ rd_kafka_err2str(message->err));
101
+ /* todo: should raise an error? */
102
+ }
103
+
104
+ /* according to @edenhill rd_kafka_message_t._private is ABI safe to call
105
+ * and represents the `msg_opaque` argument passed into `rd_kafka_produce`
106
+ */
107
+ if (NULL != message->_private) {
108
+ push_ctx = (hermann_push_ctx_t *)message->_private;
109
+
110
+ if (!message->err) {
111
+ /* if we have not errored, great! let's say we're connected */
112
+ push_ctx->producer->isConnected = 1;
113
+ }
114
+
115
+ /* call back into our Hermann::Result if it exists, discarding the
116
+ * return value
117
+ */
118
+ if (NULL != push_ctx->result) {
119
+ rb_funcall(push_ctx->result,
120
+ hermann_result_fulfill_method,
121
+ 2,
122
+ rb_str_new((char *)message->payload, message->len), /* value */
123
+ is_error /* is_error */ );
124
+ }
125
+ free(push_ctx);
126
+ }
127
+ }
128
+
129
+ /**
130
+ * Producer partitioner callback.
131
+ * Used to determine the target partition within a topic for production.
132
+ *
133
+ * Returns an integer partition number or RD_KAFKA_PARTITION_UA if no
134
+ * available partition could be determined.
135
+ *
136
+ * @param rkt rd_kafka_topic_t* the topic
137
+ * @param keydata void* key information for calculating the partition
138
+ * @param keylen size_t key size
139
+ * @param partition_cnt int32_t the count of the number of partitions
140
+ * @param rkt_opaque void* opaque topic info
141
+ * @param msg_opaque void* opaque message info
142
+ */
143
+ static int32_t producer_partitioner_callback(const rd_kafka_topic_t *rkt,
144
+ const void *keydata,
145
+ size_t keylen,
146
+ int32_t partition_cnt,
147
+ void *rkt_opaque,
148
+ void *msg_opaque) {
149
+ /* Pick a random partition */
150
+ int retry = 0;
151
+ for (; retry < partition_cnt; retry++) {
152
+ int32_t partition = rand() % partition_cnt;
153
+ if (rd_kafka_topic_partition_available(rkt, partition)) {
154
+ break; /* this one will do */
155
+ }
156
+ }
157
+ }
158
+
159
+ /**
160
+ * hexdump
161
+ *
162
+ * Write the given payload to file in hex notation.
163
+ *
164
+ * @param fp FILE* the file into which to write
165
+ * @param name char* name
166
+ * @param ptr void* payload
167
+ * @param len size_t payload length
168
+ */
169
+ static void hexdump(FILE *fp,
170
+ const char *name,
171
+ const void *ptr,
172
+ size_t len) {
173
+ const char *p = (const char *)ptr;
174
+ unsigned int of = 0;
175
+
176
+
177
+ if (name) {
178
+ fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
179
+ }
180
+
181
+ for (of = 0 ; of < len ; of += 16) {
182
+ char hexen[16*3+1];
183
+ char charen[16+1];
184
+ unsigned int hof = 0;
185
+
186
+ unsigned int cof = 0;
187
+ unsigned int i;
188
+
189
+ for (i = of ; i < of + 16 && i < len ; i++) {
190
+ hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
191
+ cof += sprintf(charen+cof, "%c",
192
+ isprint((int)p[i]) ? p[i] : '.');
193
+ }
194
+ fprintf(fp, "%08x: %-48s %-16s\n",
195
+ of, hexen, charen);
196
+ }
197
+ }
198
+
199
+ /**
200
+ * msg_consume
201
+ *
202
+ * Callback on message receipt.
203
+ *
204
+ * @param rkmessage rd_kafka_message_t* the message
205
+ * @param opaque void* opaque context
206
+ */
207
+ static void msg_consume(rd_kafka_message_t *rkmessage,
208
+ void *opaque) {
209
+
210
+ HermannInstanceConfig* cfg;
211
+
212
+ cfg = (HermannInstanceConfig*)opaque;
213
+
214
+ if (rkmessage->err) {
215
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
216
+ fprintf(stderr,
217
+ "%% Consumer reached end of %s [%"PRId32"] "
218
+ "message queue at offset %"PRId64"\n",
219
+ rd_kafka_topic_name(rkmessage->rkt),
220
+ rkmessage->partition, rkmessage->offset);
221
+
222
+ if (cfg->exit_eof) {
223
+ cfg->run = 0;
224
+ }
225
+
226
+ return;
227
+ }
228
+
229
+ fprintf(stderr, "%% Consume error for topic \"%s\" [%"PRId32"] "
230
+ "offset %"PRId64": %s\n",
231
+ rd_kafka_topic_name(rkmessage->rkt),
232
+ rkmessage->partition,
233
+ rkmessage->offset,
234
+ rd_kafka_message_errstr(rkmessage));
235
+ return;
236
+ }
237
+
238
+ if (DEBUG && rkmessage->key_len) {
239
+ if (output == OUTPUT_HEXDUMP) {
240
+ hexdump(stdout, "Message Key",
241
+ rkmessage->key, rkmessage->key_len);
242
+ }
243
+ else {
244
+ printf("Key: %.*s\n",
245
+ (int)rkmessage->key_len, (char *)rkmessage->key);
246
+ }
247
+ }
248
+
249
+ if (output == OUTPUT_HEXDUMP) {
250
+ if (DEBUG) {
251
+ hexdump(stdout, "Message Payload", rkmessage->payload, rkmessage->len);
252
+ }
253
+ }
254
+ else {
255
+ if (DEBUG) {
256
+ printf("%.*s\n", (int)rkmessage->len, (char *)rkmessage->payload);
257
+ }
258
+ }
259
+
260
+ // Yield the data to the Consumer's block
261
+ if (rb_block_given_p()) {
262
+ VALUE value = rb_str_new((char *)rkmessage->payload, rkmessage->len);
263
+ rb_yield(value);
264
+ }
265
+ else {
266
+ if (DEBUG) {
267
+ fprintf(stderr, "No block given\n"); // todo: should this be an error?
268
+ }
269
+ }
270
+ }
271
+
272
+ /**
273
+ * logger
274
+ *
275
+ * Kafka logger callback (optional)
276
+ *
277
+ * todo: introduce better logging
278
+ *
279
+ * @param rk rd_kafka_t the producer or consumer
280
+ * @param level int the log level
281
+ * @param fac char* something of which I am unaware
282
+ * @param buf char* the log message
283
+ */
284
+ static void logger(const rd_kafka_t *rk,
285
+ int level,
286
+ const char *fac,
287
+ const char *buf) {
288
+ struct timeval tv;
289
+ gettimeofday(&tv, NULL);
290
+ fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
291
+ (int)tv.tv_sec, (int)(tv.tv_usec / 1000),
292
+ level, fac, rd_kafka_name(rk), buf);
293
+ }
294
+
295
+ /**
296
+ * consumer_init_kafka
297
+ *
298
+ * Initialize the Kafka context and instantiate a consumer.
299
+ *
300
+ * @param config HermannInstanceConfig* pointer to the instance configuration for this producer or consumer
301
+ */
302
+ void consumer_init_kafka(HermannInstanceConfig* config) {
303
+
304
+ TRACER("configuring rd_kafka\n");
305
+
306
+ config->quiet = !isatty(STDIN_FILENO);
307
+
308
+ /* Kafka configuration */
309
+ config->conf = rd_kafka_conf_new();
310
+
311
+ /* Topic configuration */
312
+ config->topic_conf = rd_kafka_topic_conf_new();
313
+
314
+ /* Create Kafka handle */
315
+ if (!(config->rk = rd_kafka_new(RD_KAFKA_CONSUMER, config->conf,
316
+ config->errstr, sizeof(config->errstr)))) {
317
+ fprintf(stderr, "%% Failed to create new consumer: %s\n", config->errstr);
318
+ rb_raise(rb_eRuntimeError, "%% Failed to create new consumer: %s\n", config->errstr);
319
+ }
320
+
321
+ /* Set logger */
322
+ rd_kafka_set_logger(config->rk, logger);
323
+ rd_kafka_set_log_level(config->rk, LOG_DEBUG);
324
+
325
+ /* TODO: offset calculation */
326
+ config->start_offset = RD_KAFKA_OFFSET_END;
327
+
328
+ /* Add brokers */
329
+ if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) {
330
+ fprintf(stderr, "%% No valid brokers specified\n");
331
+ rb_raise(rb_eRuntimeError, "No valid brokers specified");
332
+ return;
333
+ }
334
+
335
+ /* Create topic */
336
+ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf);
337
+
338
+ /* We're now initialized */
339
+ config->isInitialized = 1;
340
+ }
341
+
342
+ // Ruby gem extensions
343
+
344
+ #ifdef RB_THREAD_BLOCKING_REGION
345
+ /* NOTE: We only need this method defined if RB_THREAD_BLOCKING_REGION is
346
+ * defined, otherwise it's unused
347
+ */
348
+
349
+ /**
350
+ * Callback invoked if Ruby needs to stop our Consumer's IO loop for any reason
351
+ * (system exit, etc.)
352
+ */
353
+ static void consumer_consume_stop_callback(void *ptr) {
354
+ HermannInstanceConfig* config = (HermannInstanceConfig*)ptr;
355
+
356
+ TRACER("stopping callback (%p)\n", ptr);
357
+
358
+ config->run = 0;
359
+ }
360
+ #endif
361
+
362
+ /**
363
+ * Loop on a timeout to receive messages from Kafka. When the consumer_consume_stop_callback is invoked by Ruby,
364
+ * we'll break out of our loop and return.
365
+ */
366
+ void consumer_consume_loop(HermannInstanceConfig* consumerConfig) {
367
+
368
+ TRACER("\n");
369
+
370
+ while (consumerConfig->run) {
371
+ if (rd_kafka_consume_callback(consumerConfig->rkt, consumerConfig->partition,
372
+ 1000/*timeout*/,
373
+ msg_consume,
374
+ consumerConfig) < 0) {
375
+ fprintf(stderr, "%% Error: %s\n", rd_kafka_err2str( rd_kafka_errno2err(errno)));
376
+ }
377
+
378
+ }
379
+ }
380
+
381
+ /**
382
+ * Hermann::Consumer.consume
383
+ *
384
+ * Begin listening on the configured topic for messages. msg_consume will be called on each message received.
385
+ *
386
+ * @param VALUE self the Ruby object for this consumer
387
+ */
388
+ static VALUE consumer_consume(VALUE self) {
389
+
390
+ HermannInstanceConfig* consumerConfig;
391
+
392
+ TRACER("starting consume\n");
393
+
394
+ Data_Get_Struct(self, HermannInstanceConfig, consumerConfig);
395
+
396
+ if ((NULL == consumerConfig->topic) ||
397
+ (0 == strnlen(consumerConfig->topic, HERMANN_MAX_TOPIC_LEN))) {
398
+ fprintf(stderr, "Topic is null!\n");
399
+ rb_raise(rb_eRuntimeError, "Topic cannot be empty");
400
+ return self;
401
+ }
402
+
403
+ if (!consumerConfig->isInitialized) {
404
+ consumer_init_kafka(consumerConfig);
405
+ }
406
+
407
+ /* Start consuming */
408
+ if (rd_kafka_consume_start(consumerConfig->rkt, consumerConfig->partition, consumerConfig->start_offset) == -1) {
409
+ fprintf(stderr, "%% Failed to start consuming: %s\n",
410
+ rd_kafka_err2str(rd_kafka_errno2err(errno)));
411
+ rb_raise(rb_eRuntimeError,
412
+ rd_kafka_err2str(rd_kafka_errno2err(errno)));
413
+ return Qnil;
414
+ }
415
+
416
+ #ifdef RB_THREAD_BLOCKING_REGION
417
+ /** The consumer will listen for incoming messages in a loop, timing out and checking the consumerConfig->run
418
+ * flag every second.
419
+ *
420
+ * Call rb_thread_blocking_region to release the GVM lock and allow Ruby to amuse itself while we wait on
421
+ * IO from Kafka.
422
+ *
423
+ * If Ruby needs to interrupt the consumer loop, the stop callback will be invoked and the loop should exit.
424
+ */
425
+ rb_thread_blocking_region(consumer_consume_loop,
426
+ consumerConfig,
427
+ consumer_consume_stop_callback,
428
+ consumerConfig);
429
+ #else
430
+ consumer_consume_loop(consumerConfig);
431
+ #endif
432
+
433
+
434
+ /* Stop consuming */
435
+ rd_kafka_consume_stop(consumerConfig->rkt, consumerConfig->partition);
436
+
437
+ return Qnil;
438
+ }
439
+
440
+
441
+ static void producer_error_callback(rd_kafka_t *rk,
442
+ int error,
443
+ const char *reason,
444
+ void *opaque) {
445
+ hermann_conf_t *conf = (hermann_conf_t *)rd_kafka_opaque(rk);
446
+
447
+ TRACER("error (%i): %s\n", error, reason);
448
+
449
+ conf->isErrored = error;
450
+
451
+ if (error) {
452
+ /* If we have an old error string in here we need to make sure to
453
+ * free() it before we allocate a new string
454
+ */
455
+ if (NULL != conf->error) {
456
+ free(conf->error);
457
+ }
458
+
459
+ /* Grab the length of the string plus the null character */
460
+ size_t error_length = strnlen(reason, HERMANN_MAX_ERRSTR_LEN) + 1;
461
+ conf->error = (char *)malloc((sizeof(char) * error_length));
462
+ (void)strncpy(conf->error, reason, error_length);
463
+ }
464
+ }
465
+
466
+
467
+ /**
468
+ * producer_init_kafka
469
+ *
470
+ * Initialize the producer instance, setting up the Kafka topic and context.
471
+ *
472
+ * @param self VALUE Instance of the Producer Ruby object
473
+ * @param config HermannInstanceConfig* the instance configuration associated with this producer.
474
+ */
475
+ void producer_init_kafka(VALUE self, HermannInstanceConfig* config) {
476
+
477
+ TRACER("initing (%p)\n", config);
478
+
479
+ config->quiet = !isatty(STDIN_FILENO);
480
+
481
+ /* Kafka configuration */
482
+ config->conf = rd_kafka_conf_new();
483
+
484
+
485
+ /* Add our `self` to the opaque pointer for error and logging callbacks
486
+ */
487
+ rd_kafka_conf_set_opaque(config->conf, (void*)config);
488
+ rd_kafka_conf_set_error_cb(config->conf, producer_error_callback);
489
+
490
+ /* Topic configuration */
491
+ config->topic_conf = rd_kafka_topic_conf_new();
492
+
493
+ /* Set up a message delivery report callback.
494
+ * It will be called once for each message, either on successful
495
+ * delivery to broker, or upon failure to deliver to broker. */
496
+ rd_kafka_conf_set_dr_msg_cb(config->conf, msg_delivered);
497
+
498
+ /* Create Kafka handle */
499
+ if (!(config->rk = rd_kafka_new(RD_KAFKA_PRODUCER,
500
+ config->conf,
501
+ config->errstr,
502
+ sizeof(config->errstr)))) {
503
+ /* TODO: Use proper logger */
504
+ fprintf(stderr,
505
+ "%% Failed to create new producer: %s\n", config->errstr);
506
+ rb_raise(rb_eRuntimeError, "%% Failed to create new producer: %s\n", config->errstr);
507
+ }
508
+
509
+ /* Set logger */
510
+ rd_kafka_set_logger(config->rk, logger);
511
+ rd_kafka_set_log_level(config->rk, LOG_DEBUG);
512
+
513
+ if (rd_kafka_brokers_add(config->rk, config->brokers) == 0) {
514
+ /* TODO: Use proper logger */
515
+ fprintf(stderr, "%% No valid brokers specified\n");
516
+ rb_raise(rb_eRuntimeError, "No valid brokers specified");
517
+ return;
518
+ }
519
+
520
+ /* Create topic */
521
+ config->rkt = rd_kafka_topic_new(config->rk, config->topic, config->topic_conf);
522
+
523
+ /* Set the partitioner callback */
524
+ rd_kafka_topic_conf_set_partitioner_cb( config->topic_conf, producer_partitioner_callback);
525
+
526
+ /* We're now initialized */
527
+ config->isInitialized = 1;
528
+
529
+ TRACER("completed kafka init\n");
530
+ }
531
+
532
+ /**
533
+ * producer_push_single
534
+ *
535
+ * @param self VALUE the Ruby producer instance
536
+ * @param message VALUE the ruby String containing the outgoing message.
537
+ * @param result VALUE the Hermann::Result object to be fulfilled when the
538
+ * push completes
539
+ */
540
+ static VALUE producer_push_single(VALUE self, VALUE message, VALUE result) {
541
+
542
+ HermannInstanceConfig* producerConfig;
543
+ /* Context pointer, pointing to `result`, for the librdkafka delivery
544
+ * callback
545
+ */
546
+ hermann_push_ctx_t *delivery_ctx = (hermann_push_ctx_t *)malloc(sizeof(hermann_push_ctx_t));
547
+
548
+ TRACER("self: %p, message: %p, result: %p)\n", self, message, result);
549
+
550
+ Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
551
+
552
+ delivery_ctx->producer = producerConfig;
553
+ delivery_ctx->result = NULL;
554
+
555
+ TRACER("producerConfig: %p\n", producerConfig);
556
+
557
+ if ((NULL == producerConfig->topic) ||
558
+ (0 == strnlen(producerConfig->topic, HERMANN_MAX_TOPIC_LEN))) {
559
+ fprintf(stderr, "Topic is null!\n");
560
+ rb_raise(rb_eRuntimeError, "Topic cannot be empty");
561
+ return self;
562
+ }
563
+
564
+ if (!producerConfig->isInitialized) {
565
+ producer_init_kafka(self, producerConfig);
566
+ }
567
+
568
+ TRACER("kafka initialized\n");
569
+
570
+ /* Only pass result through if it's non-nil */
571
+ if (Qnil != result) {
572
+ delivery_ctx->result = result;
573
+ TRACER("setting result: %p\n", result);
574
+ }
575
+
576
+ /* Send/Produce message. */
577
+ if (-1 == rd_kafka_produce(producerConfig->rkt,
578
+ producerConfig->partition,
579
+ RD_KAFKA_MSG_F_COPY,
580
+ RSTRING_PTR(message),
581
+ RSTRING_LEN(message),
582
+ NULL,
583
+ 0,
584
+ delivery_ctx)) {
585
+ fprintf(stderr, "%% Failed to produce to topic %s partition %i: %s\n",
586
+ rd_kafka_topic_name(producerConfig->rkt), producerConfig->partition,
587
+ rd_kafka_err2str(rd_kafka_errno2err(errno)));
588
+ /* TODO: raise a Ruby exception here, requires a test though */
589
+ }
590
+
591
+ TRACER("returning\n");
592
+
593
+ return self;
594
+ }
595
+
596
+ /**
597
+ * producer_tick
598
+ *
599
+ * This function is responsible for ticking the librdkafka reactor so we can
600
+ * get feedback from the librdkafka threads back into the Ruby environment
601
+ *
602
+ * @param self VALUE the Ruby producer instance
603
+ * @param message VALUE A Ruby FixNum of how many ms we should wait on librdkafka
604
+ */
605
+ static VALUE producer_tick(VALUE self, VALUE timeout) {
606
+ hermann_conf_t *conf = NULL;
607
+ long timeout_ms = 0;
608
+ int events = 0;
609
+
610
+ if (Qnil != timeout) {
611
+ timeout_ms = rb_num2int(timeout);
612
+ }
613
+ else {
614
+ rb_raise(rb_eArgError, "Cannot call `tick` with a nil timeout!\n");
615
+ }
616
+
617
+ Data_Get_Struct(self, hermann_conf_t, conf);
618
+
619
+ /*
620
+ * if the producerConfig is not initialized then we never properly called
621
+ * producer_push_single, so why are we ticking?
622
+ */
623
+ if (!conf->isInitialized) {
624
+ rb_raise(rb_eRuntimeError, "Cannot call `tick` without having ever sent a message\n");
625
+ }
626
+
627
+ events = rd_kafka_poll(conf->rk, timeout_ms);
628
+
629
+ if (conf->isErrored) {
630
+ rb_raise(rb_eStandardError, conf->error);
631
+ }
632
+
633
+ return rb_int_new(events);
634
+ }
635
+
636
+
637
+ static VALUE producer_connect(VALUE self, VALUE timeout) {
638
+ HermannInstanceConfig *producerConfig;
639
+ rd_kafka_resp_err_t err;
640
+ VALUE result = Qfalse;
641
+ int timeout_ms = rb_num2int(timeout);
642
+ struct rd_kafka_metadata *data = NULL;
643
+
644
+ Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
645
+
646
+ if (!producerConfig->isInitialized) {
647
+ producer_init_kafka(self, producerConfig);
648
+ }
649
+
650
+ err = rd_kafka_metadata(producerConfig->rk,
651
+ 0,
652
+ producerConfig->rkt,
653
+ &data,
654
+ timeout_ms);
655
+ TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err);
656
+
657
+ if (RD_KAFKA_RESP_ERR_NO_ERROR == err) {
658
+ TRACER("brokers: %i, topics: %i\n",
659
+ data->broker_cnt,
660
+ data->topic_cnt);
661
+ producerConfig->isConnected = 1;
662
+ result = Qtrue;
663
+ }
664
+ else {
665
+ producerConfig->isErrored = err;
666
+ }
667
+
668
+ rd_kafka_metadata_destroy(data);
669
+
670
+ return result;
671
+ }
672
+
673
+ static VALUE producer_is_connected(VALUE self) {
674
+ HermannInstanceConfig *producerConfig;
675
+
676
+ Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
677
+
678
+ if (!producerConfig->isInitialized) {
679
+ return Qfalse;
680
+ }
681
+
682
+ if (!producerConfig->isConnected) {
683
+ return Qfalse;
684
+ }
685
+
686
+ return Qtrue;
687
+ }
688
+
689
+ static VALUE producer_is_errored(VALUE self) {
690
+ HermannInstanceConfig *producerConfig;
691
+
692
+ Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
693
+
694
+ if (producerConfig->isErrored) {
695
+ return Qtrue;
696
+ }
697
+
698
+ return Qfalse;
699
+ }
700
+
701
+
702
+ /**
703
+ * consumer_free
704
+ *
705
+ * Callback called when Ruby needs to GC the configuration associated with an Hermann instance.
706
+ *
707
+ * @param p void* the instance of an HermannInstanceConfig to be freed from allocated memory.
708
+ */
709
+ static void consumer_free(void *p) {
710
+
711
+ HermannInstanceConfig* config = (HermannInstanceConfig *)p;
712
+
713
+ #ifdef TRACE
714
+ fprintf(stderr, "consumer_free\n");
715
+ #endif
716
+
717
+ // the p *should* contain a pointer to the consumerConfig which also must be freed
718
+ if (config->rkt != NULL) {
719
+ rd_kafka_topic_destroy(config->rkt);
720
+ }
721
+
722
+ if (config->rk != NULL) {
723
+ rd_kafka_destroy(config->rk);
724
+ }
725
+
726
+ // clean up the struct
727
+ free(config);
728
+ }
729
+
730
+ /**
731
+ * consumer_allocate
732
+ *
733
+ * Allocate and wrap an HermannInstanceConfig for this Consumer object.
734
+ *
735
+ * @param klass VALUE the class of the enclosing Ruby object.
736
+ */
737
+ static VALUE consumer_allocate(VALUE klass) {
738
+
739
+ VALUE obj;
740
+ HermannInstanceConfig* consumerConfig;
741
+
742
+ #ifdef TRACE
743
+ fprintf(stderr, "consumer_free\n");
744
+ #endif
745
+
746
+ consumerConfig = ALLOC(HermannInstanceConfig);
747
+
748
+ // Make sure it's initialized
749
+ consumerConfig->topic = NULL;
750
+ consumerConfig->rk = NULL;
751
+ consumerConfig->rkt = NULL;
752
+ consumerConfig->brokers = NULL;
753
+ consumerConfig->partition = -1;
754
+ consumerConfig->topic_conf = NULL;
755
+ consumerConfig->errstr[0] = 0;
756
+ consumerConfig->conf = NULL;
757
+ consumerConfig->debug = NULL;
758
+ consumerConfig->start_offset = -1;
759
+ consumerConfig->do_conf_dump = -1;
760
+ consumerConfig->run = 0;
761
+ consumerConfig->exit_eof = 0;
762
+ consumerConfig->quiet = 0;
763
+ consumerConfig->isInitialized = 0;
764
+
765
+ obj = Data_Wrap_Struct(klass, 0, consumer_free, consumerConfig);
766
+
767
+ return obj;
768
+ }
769
+
770
+ /**
771
+ * consumer_initialize
772
+ *
773
+ * todo: configure the brokers through passed parameter, later through zk
774
+ *
775
+ * Set up the Consumer's HermannInstanceConfig context.
776
+ *
777
+ * @param self VALUE the Ruby instance of the Consumer
778
+ * @param topic VALUE a Ruby string
779
+ * @param brokers VALUE a Ruby string containing list of host:port
780
+ * @param partition VALUE a Ruby number
781
+ */
782
+ static VALUE consumer_initialize(VALUE self,
783
+ VALUE topic,
784
+ VALUE brokers,
785
+ VALUE partition) {
786
+
787
+ HermannInstanceConfig* consumerConfig;
788
+ char* topicPtr;
789
+ char* brokersPtr;
790
+ int partitionNo;
791
+
792
+ TRACER("initing consumer ruby object\n");
793
+
794
+ topicPtr = StringValuePtr(topic);
795
+ brokersPtr = StringValuePtr(brokers);
796
+ partitionNo = FIX2INT(partition);
797
+ Data_Get_Struct(self, HermannInstanceConfig, consumerConfig);
798
+
799
+ consumerConfig->topic = topicPtr;
800
+ consumerConfig->brokers = brokersPtr;
801
+ consumerConfig->partition = partitionNo;
802
+ consumerConfig->run = 1;
803
+ consumerConfig->exit_eof = 0;
804
+ consumerConfig->quiet = 0;
805
+
806
+ return self;
807
+ }
808
+
809
+ /**
810
+ * consumer_init_copy
811
+ *
812
+ * When copying into a new instance of a Consumer, reproduce the configuration info.
813
+ *
814
+ * @param copy VALUE the Ruby Consumer instance (with configuration) as destination
815
+ * @param orig VALUE the Ruby Consumer instance (with configuration) as source
816
+ *
817
+ */
818
+ static VALUE consumer_init_copy(VALUE copy,
819
+ VALUE orig) {
820
+ HermannInstanceConfig* orig_config;
821
+ HermannInstanceConfig* copy_config;
822
+
823
+ if (copy == orig) {
824
+ return copy;
825
+ }
826
+
827
+ if (TYPE(orig) != T_DATA || RDATA(orig)->dfree != (RUBY_DATA_FUNC)consumer_free) {
828
+ rb_raise(rb_eTypeError, "wrong argument type");
829
+ }
830
+
831
+ Data_Get_Struct(orig, HermannInstanceConfig, orig_config);
832
+ Data_Get_Struct(copy, HermannInstanceConfig, copy_config);
833
+
834
+ // Copy over the data from one struct to the other
835
+ MEMCPY(copy_config, orig_config, HermannInstanceConfig, 1);
836
+
837
+ return copy;
838
+ }
839
+
840
+ /**
841
+ * producer_free
842
+ *
843
+ * Reclaim memory allocated to the Producer's configuration
844
+ *
845
+ * @param p void* the instance's configuration struct
846
+ */
847
+ static void producer_free(void *p) {
848
+
849
+ HermannInstanceConfig* config = (HermannInstanceConfig *)p;
850
+
851
+ TRACER("dealloc producer ruby object (%p)\n", p);
852
+
853
+
854
+ if (NULL == p) {
855
+ return;
856
+ }
857
+
858
+ // Clean up the topic
859
+ if (NULL != config->rkt) {
860
+ rd_kafka_topic_destroy(config->rkt);
861
+ }
862
+
863
+ // Take care of the producer instance
864
+ if (NULL != config->rk) {
865
+ rd_kafka_destroy(config->rk);
866
+ }
867
+
868
+ // Free the struct
869
+ free(config);
870
+ }
871
+
872
+ /**
873
+ * producer_allocate
874
+ *
875
+ * Allocate the memory for a Producer's configuration
876
+ *
877
+ * @param klass VALUE the class of the Producer
878
+ */
879
+ static VALUE producer_allocate(VALUE klass) {
880
+
881
+ VALUE obj;
882
+ HermannInstanceConfig* producerConfig = ALLOC(HermannInstanceConfig);
883
+
884
+ producerConfig->topic = NULL;
885
+ producerConfig->rk = NULL;
886
+ producerConfig->rkt = NULL;
887
+ producerConfig->brokers = NULL;
888
+ producerConfig->partition = -1;
889
+ producerConfig->topic_conf = NULL;
890
+ producerConfig->errstr[0] = 0;
891
+ producerConfig->conf = NULL;
892
+ producerConfig->debug = NULL;
893
+ producerConfig->start_offset = -1;
894
+ producerConfig->do_conf_dump = -1;
895
+ producerConfig->run = 0;
896
+ producerConfig->exit_eof = 0;
897
+ producerConfig->quiet = 0;
898
+ producerConfig->isInitialized = 0;
899
+ producerConfig->isConnected = 0;
900
+ producerConfig->isErrored = 0;
901
+ producerConfig->error = NULL;
902
+
903
+ obj = Data_Wrap_Struct(klass, 0, producer_free, producerConfig);
904
+
905
+ return obj;
906
+ }
907
+
908
+ /**
909
+ * producer_initialize
910
+ *
911
+ * Set up the configuration context for the Producer instance
912
+ *
913
+ * @param self VALUE the Producer instance
914
+ * @param topic VALUE the Ruby string naming the topic
915
+ * @param brokers VALUE a Ruby string containing host:port pairs separated by commas
916
+ */
917
+ static VALUE producer_initialize(VALUE self,
918
+ VALUE topic,
919
+ VALUE brokers) {
920
+
921
+ HermannInstanceConfig* producerConfig;
922
+ char* topicPtr;
923
+ char* brokersPtr;
924
+
925
+ TRACER("initialize Producer ruby object\n");
926
+
927
+
928
+ topicPtr = StringValuePtr(topic);
929
+ brokersPtr = StringValuePtr(brokers);
930
+ Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
931
+
932
+ producerConfig->topic = topicPtr;
933
+ producerConfig->brokers = brokersPtr;
934
+ /** Using RD_KAFKA_PARTITION_UA specifies we want the partitioner callback to be called to determine the target
935
+ * partition
936
+ */
937
+ producerConfig->partition = RD_KAFKA_PARTITION_UA;
938
+ producerConfig->run = 1;
939
+ producerConfig->exit_eof = 0;
940
+ producerConfig->quiet = 0;
941
+
942
+ return self;
943
+ }
944
+
945
+ /**
946
+ * producer_init_copy
947
+ *
948
+ * Copy the configuration information from orig into copy for the given Producer instances.
949
+ *
950
+ * @param copy VALUE destination Producer
951
+ * @param orig VALUE source Producer
952
+ */
953
+ static VALUE producer_init_copy(VALUE copy,
954
+ VALUE orig) {
955
+ HermannInstanceConfig* orig_config;
956
+ HermannInstanceConfig* copy_config;
957
+
958
+ if (copy == orig) {
959
+ return copy;
960
+ }
961
+
962
+ if (TYPE(orig) != T_DATA || RDATA(orig)->dfree != (RUBY_DATA_FUNC)producer_free) {
963
+ rb_raise(rb_eTypeError, "wrong argument type");
964
+ }
965
+
966
+ Data_Get_Struct(orig, HermannInstanceConfig, orig_config);
967
+ Data_Get_Struct(copy, HermannInstanceConfig, copy_config);
968
+
969
+ // Copy over the data from one struct to the other
970
+ MEMCPY(copy_config, orig_config, HermannInstanceConfig, 1);
971
+
972
+ return copy;
973
+ }
974
+
975
+ /**
976
+ * Init_hermann_lib
977
+ *
978
+ * Called by Ruby when the Hermann gem is loaded.
979
+ * Defines the Hermann module.
980
+ * Defines the Producer and Consumer classes.
981
+ */
982
+ void Init_hermann_lib() {
983
+ VALUE lib_module, c_consumer, c_producer;
984
+
985
+ TRACER("setting up Hermann::Lib\n");
986
+
987
+ /* Define the module */
988
+ hermann_module = rb_define_module("Hermann");
989
+ lib_module = rb_define_module_under(hermann_module, "Lib");
990
+
991
+
992
+ /* ---- Define the consumer class ---- */
993
+ c_consumer = rb_define_class_under(lib_module, "Consumer", rb_cObject);
994
+
995
+ /* Allocate */
996
+ rb_define_alloc_func(c_consumer, consumer_allocate);
997
+
998
+ /* Initialize */
999
+ rb_define_method(c_consumer, "initialize", consumer_initialize, 3);
1000
+ rb_define_method(c_consumer, "initialize_copy", consumer_init_copy, 1);
1001
+
1002
+ /* Consumer has method 'consume' */
1003
+ rb_define_method( c_consumer, "consume", consumer_consume, 0 );
1004
+
1005
+ /* ---- Define the producer class ---- */
1006
+ c_producer = rb_define_class_under(lib_module, "Producer", rb_cObject);
1007
+
1008
+ /* Allocate */
1009
+ rb_define_alloc_func(c_producer, producer_allocate);
1010
+
1011
+ /* Initialize */
1012
+ rb_define_method(c_producer, "initialize", producer_initialize, 2);
1013
+ rb_define_method(c_producer, "initialize_copy", producer_init_copy, 1);
1014
+
1015
+ /* Producer.push_single(msg) */
1016
+ rb_define_method(c_producer, "push_single", producer_push_single, 2);
1017
+
1018
+ /* Producer.tick */
1019
+ rb_define_method(c_producer, "tick", producer_tick, 1);
1020
+
1021
+ /* Producer.connected? */
1022
+ rb_define_method(c_producer, "connected?", producer_is_connected, 0);
1023
+
1024
+ /* Producer.errored? */
1025
+ rb_define_method(c_producer, "errored?", producer_is_errored, 0);
1026
+
1027
+ /* Producer.connect */
1028
+ rb_define_method(c_producer, "connect", producer_connect, 1);
1029
+ }