hermann 0.24.0.0-java → 0.24.1.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 19434f0247b122bbee20a57a059241bfa881fbd3
4
- data.tar.gz: 400d47b34a071c26220a5ec9000ff53bd63d9c42
3
+ metadata.gz: 36258654c7dab0d22f5605f7570e9af0bc1d413f
4
+ data.tar.gz: 6a76a7fe69d25b7ca81b729ead0bb443fbb89e64
5
5
  SHA512:
6
- metadata.gz: cffa05d54e8d6280ce1ca2bf92fad98823bfd6550568e32741b83e8704e4001f8bc616be89e6e9021c83c46d1b231ed58be7cb4c7b42a80773efab53f2549455
7
- data.tar.gz: 313d612acc4e30991f305dc69c079293c386e88e060aeacce93e6e023d34f7279ee3cab76215ba71e184a0924f080dfd53fe51a500a28293bfa24eda2aebb77e
6
+ metadata.gz: 7e12d640758271fbc235823e3cb1990c63fc6140160e79ca3b40204ffc3a696a57b92bd4ccc09150b692532ea427147d4ee84c41ba8d5a0939657ba82bb51061
7
+ data.tar.gz: c9dd57c7ffac7ed14b29b0c76bb2dc048226c381608c772bdef86c653e18ff12b0563268c789929d9964acd339ae66d03afe041fd6ce71869d209487cf63049e
@@ -147,6 +147,7 @@ $LOCAL_LIBS << File.join(librdkafka.path, 'lib', 'librdkafka.a')
147
147
 
148
148
  have_header('ruby/thread.h')
149
149
  have_header('ruby/intern.h')
150
+ have_header('ruby/version.h')
150
151
  have_func('rb_thread_blocking_region')
151
152
  have_func('rb_thread_call_without_gvl')
152
153
 
@@ -33,6 +33,9 @@
33
33
 
34
34
  #include "hermann_lib.h"
35
35
 
36
+ #ifdef HAVE_RUBY_VERSION_H
37
+ #include <ruby/version.h>
38
+ #endif
36
39
 
37
40
  /* how long to let librdkafka block on the socket before returning back to the interpreter.
38
41
  * essentially defines how long we wait before consumer_consume_stop_callback() can fire */
@@ -120,7 +123,7 @@ static void msg_delivered(rd_kafka_t *rk,
120
123
  /* call back into our Hermann::Result if it exists, discarding the
121
124
  * return value
122
125
  */
123
- if (NULL != push_ctx->result) {
126
+ if (NULL != (void *)push_ctx->result) {
124
127
  rb_funcall(push_ctx->result,
125
128
  hermann_result_fulfill_method,
126
129
  2,
@@ -153,12 +156,15 @@ static int32_t producer_partitioner_callback(const rd_kafka_topic_t *rkt,
153
156
  void *msg_opaque) {
154
157
  /* Pick a random partition */
155
158
  int retry = 0;
159
+ int32_t partition = RD_KAFKA_PARTITION_UA;
160
+
156
161
  for (; retry < partition_cnt; retry++) {
157
- int32_t partition = rand() % partition_cnt;
162
+ partition = rand() % partition_cnt;
158
163
  if (rd_kafka_topic_partition_available(rkt, partition)) {
159
164
  break; /* this one will do */
160
165
  }
161
166
  }
167
+ return partition;
162
168
  }
163
169
 
164
170
  /**
@@ -259,6 +265,7 @@ static void msg_consume(rd_kafka_message_t *rkmessage, HermannInstanceConfig *cf
259
265
  // Yield the data to the Consumer's block
260
266
  if (rb_block_given_p()) {
261
267
  VALUE value = rb_str_new((char *)rkmessage->payload, rkmessage->len);
268
+ rd_kafka_message_destroy(rkmessage);
262
269
  rb_yield(value);
263
270
  }
264
271
  else {
@@ -388,15 +395,19 @@ static void *consumer_recv_msg(void *ptr)
388
395
  * after every message, to see if the ruby interpreter wants us to exit the
389
396
  * loop.
390
397
  *
391
- * @param HermannInstanceConfig* The hermann configuration for this consumer
398
+ * @param self The consumer instance
392
399
  */
393
400
 
394
- static void consumer_consume_loop(HermannInstanceConfig* consumerConfig) {
401
+ static VALUE consumer_consume_loop(VALUE self) {
402
+ HermannInstanceConfig* consumerConfig;
395
403
  rd_kafka_message_t *msg;
404
+
405
+ Data_Get_Struct(self, HermannInstanceConfig, consumerConfig);
406
+
396
407
  TRACER("\n");
397
408
 
398
409
  while (consumerConfig->run) {
399
- #ifdef HAVE_RB_THREAD_BLOCKING_REGION
410
+ #if HAVE_RB_THREAD_BLOCKING_REGION && RUBY_API_VERSION_MAJOR < 2
400
411
  msg = (rd_kafka_message_t *) rb_thread_blocking_region((rb_blocking_function_t *) consumer_recv_msg,
401
412
  consumerConfig,
402
413
  consumer_consume_stop_callback,
@@ -412,9 +423,24 @@ static void consumer_consume_loop(HermannInstanceConfig* consumerConfig) {
412
423
 
413
424
  if ( msg ) {
414
425
  msg_consume(msg, consumerConfig);
415
- rd_kafka_message_destroy(msg);
416
426
  }
417
427
  }
428
+
429
+ return Qnil;
430
+ }
431
+
432
+
433
+ /**
434
+ * consumer_consume_loop_stop
435
+ *
436
+ * called when we're done with the .consume() loop. lets rdkafa cleanup some internal structures
437
+ */
438
+ static VALUE consumer_consume_loop_stop(VALUE self) {
439
+ HermannInstanceConfig* consumerConfig;
440
+ Data_Get_Struct(self, HermannInstanceConfig, consumerConfig);
441
+
442
+ rd_kafka_consume_stop(consumerConfig->rkt, consumerConfig->partition);
443
+ return Qnil;
418
444
  }
419
445
 
420
446
  /**
@@ -446,17 +472,12 @@ static VALUE consumer_consume(VALUE self, VALUE topic) {
446
472
  if (rd_kafka_consume_start(consumerConfig->rkt, consumerConfig->partition, consumerConfig->start_offset) == -1) {
447
473
  fprintf(stderr, "%% Failed to start consuming: %s\n",
448
474
  rd_kafka_err2str(rd_kafka_errno2err(errno)));
449
- rb_raise(rb_eRuntimeError,
475
+ rb_raise(rb_eRuntimeError, "%s",
450
476
  rd_kafka_err2str(rd_kafka_errno2err(errno)));
451
477
  return Qnil;
452
478
  }
453
479
 
454
- consumer_consume_loop(consumerConfig);
455
-
456
- /* Stop consuming */
457
- rd_kafka_consume_stop(consumerConfig->rkt, consumerConfig->partition);
458
-
459
- return Qnil;
480
+ return rb_ensure(consumer_consume_loop, self, consumer_consume_loop_stop, self);
460
481
  }
461
482
 
462
483
 
@@ -575,7 +596,7 @@ static VALUE producer_push_single(VALUE self, VALUE message, VALUE topic, VALUE
575
596
  Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
576
597
 
577
598
  delivery_ctx->producer = producerConfig;
578
- delivery_ctx->result = NULL;
599
+ delivery_ctx->result = (VALUE) NULL;
579
600
 
580
601
  TRACER("producerConfig: %p\n", producerConfig);
581
602
 
@@ -666,19 +687,56 @@ static VALUE producer_tick(VALUE self, VALUE timeout) {
666
687
  events = rd_kafka_poll(conf->rk, timeout_ms);
667
688
 
668
689
  if (conf->isErrored) {
669
- rb_raise(rb_eStandardError, conf->error);
690
+ rb_raise(rb_eStandardError, "%s", conf->error);
670
691
  }
671
692
 
672
693
  return rb_int_new(events);
673
694
  }
674
695
 
696
+ /*
697
+ * producer_metadata_request_nogvl
698
+ *
699
+ * call rd_kafka_metadata without the GVL held. Note that rd_kafka_metadata is not interruptible,
700
+ * so in case of interrupt the thread will not respond until timeout_ms is reached.
701
+ *
702
+ * rd_kafka_metadata will fill in the ctx->data pointer on success
703
+ *
704
+ * @param ptr void* the hermann_metadata_ctx_t
705
+ */
706
+
707
+ static void *producer_metadata_request_nogvl(void *ptr)
708
+ {
709
+ hermann_metadata_ctx_t *ctx = (hermann_metadata_ctx_t*)ptr;
710
+
711
+ return (void *) rd_kafka_metadata(ctx->rk,
712
+ ctx->topic ? 0 : 1,
713
+ ctx->topic,
714
+ (const struct rd_kafka_metadata **) &(ctx->data),
715
+ ctx->timeout_ms);
716
+ }
717
+
718
+
719
+ static int producer_metadata_request(hermann_metadata_ctx_t *ctx)
720
+ {
721
+ int err;
722
+
723
+ #if HAVE_RB_THREAD_BLOCKING_REGION && RUBY_API_VERSION_MAJOR < 2
724
+ err = (int) rb_thread_blocking_region((rb_blocking_function_t *) producer_metadata_request_nogvl, ctx,
725
+ NULL, NULL);
726
+ #elif HAVE_RB_THREAD_CALL_WITHOUT_GVL
727
+ err = (int) rb_thread_call_without_gvl(producer_metadata_request_nogvl, ctx, NULL, NULL);
728
+ #else
729
+ err = (int) producer_metadata_request_nogvl(ctx);
730
+ #endif
731
+
732
+ return err;
733
+ }
675
734
 
676
735
  static VALUE producer_connect(VALUE self, VALUE timeout) {
677
736
  HermannInstanceConfig *producerConfig;
678
737
  rd_kafka_resp_err_t err;
679
738
  VALUE result = Qfalse;
680
- int timeout_ms = rb_num2int(timeout);
681
- struct rd_kafka_metadata *data = NULL;
739
+ hermann_metadata_ctx_t md_context;
682
740
 
683
741
  Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
684
742
 
@@ -686,17 +744,19 @@ static VALUE producer_connect(VALUE self, VALUE timeout) {
686
744
  producer_init_kafka(self, producerConfig);
687
745
  }
688
746
 
689
- err = rd_kafka_metadata(producerConfig->rk,
690
- 0,
691
- producerConfig->rkt,
692
- &data,
693
- timeout_ms);
747
+ md_context.rk = producerConfig->rk;
748
+ md_context.topic = NULL;
749
+ md_context.data = NULL;
750
+ md_context.timeout_ms = rb_num2int(timeout);
751
+
752
+ err = producer_metadata_request(&md_context);
753
+
694
754
  TRACER("err: %s (%i)\n", rd_kafka_err2str(err), err);
695
755
 
696
756
  if (RD_KAFKA_RESP_ERR_NO_ERROR == err) {
697
757
  TRACER("brokers: %i, topics: %i\n",
698
- data->broker_cnt,
699
- data->topic_cnt);
758
+ md_context.data->broker_cnt,
759
+ md_context.data->topic_cnt);
700
760
  producerConfig->isConnected = 1;
701
761
  result = Qtrue;
702
762
  }
@@ -704,11 +764,118 @@ static VALUE producer_connect(VALUE self, VALUE timeout) {
704
764
  producerConfig->isErrored = err;
705
765
  }
706
766
 
707
- rd_kafka_metadata_destroy(data);
767
+ if ( md_context.data )
768
+ rd_kafka_metadata_destroy(md_context.data);
708
769
 
709
770
  return result;
710
771
  }
711
772
 
773
+ /*
774
+ * producer_metadata_make_hash
775
+ *
776
+ * transform the rd_kafka_metadata structure into a ruby hash. eg:
777
+ * { :brokers => [ {:id=>0, :host=>"172.20.10.3", :port=>9092} ],
778
+ * :topics => { "maxwell" => [ {:id=>0, :leader_id=>0, :replica_ids=>[0], :isr_ids=>[0]}]} }
779
+ *
780
+ * @param data struct rd_kafka_metadata* data returned from rd_kafka_metadata
781
+ */
782
+
783
+ static VALUE producer_metadata_make_hash(struct rd_kafka_metadata *data)
784
+ {
785
+ int i, j, k;
786
+ VALUE broker_hash, topic_hash, partition_ary, partition_hash, partition_replica_ary, partition_isr_ary;
787
+ VALUE hash = rb_hash_new();
788
+ VALUE brokers = rb_ary_new2(data->broker_cnt);
789
+ VALUE topics = rb_hash_new();
790
+
791
+ for ( i = 0; i < data->broker_cnt; i++ ) {
792
+ broker_hash = rb_hash_new();
793
+ rb_hash_aset(broker_hash, ID2SYM(rb_intern("id")), INT2FIX(data->brokers[i].id));
794
+ rb_hash_aset(broker_hash, ID2SYM(rb_intern("host")), rb_str_new2(data->brokers[i].host));
795
+ rb_hash_aset(broker_hash, ID2SYM(rb_intern("port")), INT2FIX(data->brokers[i].port));
796
+ rb_ary_push(brokers, broker_hash);
797
+ }
798
+
799
+ for ( i = 0; i < data->topic_cnt; i++ ) {
800
+ partition_ary = rb_ary_new2(data->topics[i].partition_cnt);
801
+
802
+ for ( j = 0 ; j < data->topics[i].partition_cnt ; j++ ) {
803
+ VALUE partition_hash = rb_hash_new();
804
+ rd_kafka_metadata_partition_t *partition = &(data->topics[i].partitions[j]);
805
+
806
+ /* id => 1, leader_id => 0 */
807
+ rb_hash_aset(partition_hash, ID2SYM(rb_intern("id")), INT2FIX(partition->id));
808
+ rb_hash_aset(partition_hash, ID2SYM(rb_intern("leader_id")), INT2FIX(partition->leader));
809
+
810
+ /* replica_ids => [1, 0] */
811
+ partition_replica_ary = rb_ary_new2(partition->replica_cnt);
812
+ for ( k = 0 ; k < partition->replica_cnt ; k++ ) {
813
+ rb_ary_push(partition_replica_ary, INT2FIX(partition->replicas[k]));
814
+ }
815
+ rb_hash_aset(partition_hash, ID2SYM(rb_intern("replica_ids")), partition_replica_ary);
816
+
817
+ /* isr_ids => [1, 0] */
818
+ partition_isr_ary = rb_ary_new2(partition->isr_cnt);
819
+ for ( k = 0 ; k < partition->isr_cnt ; k++ ) {
820
+ rb_ary_push(partition_isr_ary, INT2FIX(partition->isrs[k]));
821
+ }
822
+ rb_hash_aset(partition_hash, ID2SYM(rb_intern("isr_ids")), partition_isr_ary);
823
+
824
+ rb_ary_push(partition_ary, partition_hash);
825
+ }
826
+
827
+ rb_hash_aset(topics, rb_str_new2(data->topics[i].topic), partition_ary);
828
+ }
829
+
830
+ rb_hash_aset(hash, ID2SYM(rb_intern("brokers")), brokers);
831
+ rb_hash_aset(hash, ID2SYM(rb_intern("topics")), topics);
832
+ return hash;
833
+ }
834
+
835
+ /*
836
+ * producer_metadata
837
+ *
838
+ * make a metadata request to the kafka server, returning a hash
839
+ * containing a list of brokers and topics.
840
+ *
841
+ * @param data struct rd_kafka_metadata* data returned from rd_kafka_metadata
842
+ */
843
+
844
+ static VALUE producer_metadata(VALUE self, VALUE topicStr, VALUE timeout) {
845
+ HermannInstanceConfig *producerConfig;
846
+ rd_kafka_resp_err_t err;
847
+ hermann_metadata_ctx_t md_context;
848
+ VALUE result;
849
+
850
+ Data_Get_Struct(self, HermannInstanceConfig, producerConfig);
851
+
852
+ if (!producerConfig->isInitialized) {
853
+ producer_init_kafka(self, producerConfig);
854
+ }
855
+
856
+ md_context.rk = producerConfig->rk;
857
+ md_context.timeout_ms = rb_num2int(timeout);
858
+
859
+ if ( !NIL_P(topicStr) ) {
860
+ Check_Type(topicStr, T_STRING);
861
+ md_context.topic = rd_kafka_topic_new(producerConfig->rk, StringValuePtr(topicStr), NULL);
862
+ } else {
863
+ md_context.topic = NULL;
864
+ }
865
+
866
+ err = producer_metadata_request(&md_context);
867
+
868
+ if ( err != RD_KAFKA_RESP_ERR_NO_ERROR ) {
869
+ // annoyingly, this is always a timeout error -- the rest rdkafka just jams onto STDERR
870
+ rb_raise( rb_eRuntimeError, "%s", rd_kafka_err2str(err) );
871
+ } else {
872
+ result = producer_metadata_make_hash(md_context.data);
873
+ rd_kafka_metadata_destroy(md_context.data);
874
+ return result;
875
+ }
876
+
877
+ }
878
+
712
879
  static VALUE producer_is_connected(VALUE self) {
713
880
  HermannInstanceConfig *producerConfig;
714
881
 
@@ -1076,4 +1243,7 @@ void Init_hermann_lib() {
1076
1243
 
1077
1244
  /* Producer.connect */
1078
1245
  rb_define_method(c_producer, "connect", producer_connect, 1);
1246
+
1247
+ /* Producer.metadata */
1248
+ rb_define_method(c_producer, "metadata", producer_metadata, 2);
1079
1249
  }
@@ -113,4 +113,11 @@ typedef struct {
113
113
  VALUE result;
114
114
  } hermann_push_ctx_t;
115
115
 
116
+ typedef struct {
117
+ rd_kafka_t *rk;
118
+ rd_kafka_topic_t *topic;
119
+ struct rd_kafka_metadata *data;
120
+ int timeout_ms;
121
+ } hermann_metadata_ctx_t;
122
+
116
123
  #endif
@@ -0,0 +1,77 @@
1
+ require 'hermann_lib'
2
+ require 'hermann/consumer'
3
+
4
+ module Hermann
5
+ module Discovery
6
+ class Metadata
7
+ Broker = Struct.new(:id, :host, :port) do
8
+ def to_s
9
+ "#{host}:#{port}"
10
+ end
11
+ end
12
+ Topic = Struct.new(:name, :partitions)
13
+
14
+ Partition = Struct.new(:id, :leader, :replicas, :insync_replicas, :topic_name) do
15
+ def consumer(offset=:end)
16
+ Hermann::Consumer.new(topic_name, brokers: ([leader] + replicas).join(','), partition: id, offset: offset)
17
+ end
18
+ end
19
+
20
+ DEFAULT_TIMEOUT_MS = 2_000
21
+ def initialize(brokers, options = {})
22
+ raise "this is an MRI api only!" if Hermann.jruby?
23
+ @internal = Hermann::Lib::Producer.new(brokers)
24
+ @timeout = options[:timeout] || DEFAULT_TIMEOUT_MS
25
+ end
26
+
27
+ #
28
+ # @internal.metadata returns:
29
+ # {:brokers => [{:id=>3, :host=>"kafka3.alpha4.sac1.zdsys.com", :port=>9092}],
30
+ # :topics => {"testtopic"=>[{:id=>0, :leader_id=>3, :replica_ids=>[3, 1], :isr_ids=>[3, 1]}}}
31
+ #
32
+ def brokers
33
+ brokers_from_metadata(@internal.metadata(nil, @timeout))
34
+ end
35
+
36
+ def topic(t)
37
+ get_topics(t)[t]
38
+ end
39
+
40
+ def topics
41
+ get_topics
42
+ end
43
+
44
+ private
45
+
46
+ def get_topics(filter_topics = nil)
47
+ md = @internal.metadata(filter_topics, @timeout)
48
+
49
+ broker_hash = brokers_from_metadata(md).inject({}) do |h, broker|
50
+ h[broker.id] = broker
51
+ h
52
+ end
53
+
54
+ md[:topics].inject({}) do |topic_hash, arr|
55
+ topic_name, raw_partitions = *arr
56
+ partitions = raw_partitions.map do |p|
57
+ leader = broker_hash[p[:leader_id]]
58
+ all_replicas = p[:replica_ids].map { |i| broker_hash[i] }
59
+ isr_replicas = p[:isr_ids].map { |i| broker_hash[i] }
60
+ Partition.new(p[:id], leader, all_replicas, isr_replicas, topic_name)
61
+ end
62
+
63
+ topic_hash[topic_name] = Topic.new(topic_name, partitions)
64
+ topic_hash
65
+ end
66
+ end
67
+
68
+
69
+ def brokers_from_metadata(md)
70
+ md[:brokers].map do |h|
71
+ Broker.new(h[:id], h[:host], h[:port])
72
+ end
73
+ end
74
+
75
+ end
76
+ end
77
+ end
@@ -1,3 +1,3 @@
1
1
  module Hermann
2
- VERSION = '0.24.0'
2
+ VERSION = '0.24.1'
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: hermann
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.24.0.0
4
+ version: 0.24.1.0
5
5
  platform: java
6
6
  authors:
7
7
  - R. Tyler Croy
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2015-06-15 00:00:00.000000000 Z
13
+ date: 2015-06-19 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: json
@@ -60,12 +60,12 @@ dependencies:
60
60
  requirements:
61
61
  - - ~>
62
62
  - !ruby/object:Gem::Version
63
- version: 0.1.9
63
+ version: 0.1.10
64
64
  requirement: !ruby/object:Gem::Requirement
65
65
  requirements:
66
66
  - - ~>
67
67
  - !ruby/object:Gem::Version
68
- version: 0.1.9
68
+ version: 0.1.10
69
69
  prerelease: false
70
70
  type: :runtime
71
71
  description: Ruby gem for talking to Kafka
@@ -83,6 +83,7 @@ files:
83
83
  - ext/hermann/hermann_lib.h
84
84
  - lib/hermann.rb
85
85
  - lib/hermann/consumer.rb
86
+ - lib/hermann/discovery/metadata.rb
86
87
  - lib/hermann/discovery/zookeeper.rb
87
88
  - lib/hermann/errors.rb
88
89
  - lib/hermann/java.rb
@@ -112,10 +113,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
112
113
  - !ruby/object:Gem::Version
113
114
  version: '0'
114
115
  requirements:
115
- - jar org.apache.kafka:kafka_2.10, ~>0.8.1.1, ['junit:junit']
116
+ - jar org.apache.kafka:kafka_2.10, ~>0.8.1.1
116
117
  - jar log4j:log4j, ~>1.2.16
117
118
  rubyforge_project:
118
- rubygems_version: 2.4.5
119
+ rubygems_version: 2.4.8
119
120
  signing_key:
120
121
  specification_version: 3
121
122
  summary: A Kafka consumer/producer gem supporting both MRI and JRuby