poseidon 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. data/.gitignore +19 -0
  2. data/.rspec +2 -0
  3. data/.travis.yml +12 -0
  4. data/.yardopts +8 -0
  5. data/Gemfile +13 -0
  6. data/LICENSE.txt +22 -0
  7. data/README.md +71 -0
  8. data/Rakefile +17 -0
  9. data/TODO.md +27 -0
  10. data/examples/consumer.rb +18 -0
  11. data/examples/producer.rb +9 -0
  12. data/lib/poseidon/broker_pool.rb +72 -0
  13. data/lib/poseidon/cluster_metadata.rb +63 -0
  14. data/lib/poseidon/compressed_value.rb +23 -0
  15. data/lib/poseidon/compression/gzip_codec.rb +23 -0
  16. data/lib/poseidon/compression/snappy_codec.rb +17 -0
  17. data/lib/poseidon/compression.rb +30 -0
  18. data/lib/poseidon/connection.rb +138 -0
  19. data/lib/poseidon/fetched_message.rb +37 -0
  20. data/lib/poseidon/message.rb +151 -0
  21. data/lib/poseidon/message_conductor.rb +84 -0
  22. data/lib/poseidon/message_set.rb +80 -0
  23. data/lib/poseidon/message_to_send.rb +33 -0
  24. data/lib/poseidon/messages_for_broker.rb +39 -0
  25. data/lib/poseidon/messages_to_send.rb +47 -0
  26. data/lib/poseidon/messages_to_send_batch.rb +27 -0
  27. data/lib/poseidon/partition_consumer.rb +154 -0
  28. data/lib/poseidon/producer.rb +193 -0
  29. data/lib/poseidon/producer_compression_config.rb +36 -0
  30. data/lib/poseidon/protocol/protocol_struct.rb +238 -0
  31. data/lib/poseidon/protocol/request_buffer.rb +78 -0
  32. data/lib/poseidon/protocol/response_buffer.rb +72 -0
  33. data/lib/poseidon/protocol.rb +122 -0
  34. data/lib/poseidon/sync_producer.rb +117 -0
  35. data/lib/poseidon/topic_metadata.rb +65 -0
  36. data/lib/poseidon/version.rb +4 -0
  37. data/lib/poseidon.rb +102 -0
  38. data/poseidon.gemspec +24 -0
  39. data/spec/bin/kafka-run-class.sh +65 -0
  40. data/spec/integration/multiple_brokers/round_robin_spec.rb +39 -0
  41. data/spec/integration/multiple_brokers/spec_helper.rb +34 -0
  42. data/spec/integration/simple/compression_spec.rb +20 -0
  43. data/spec/integration/simple/connection_spec.rb +33 -0
  44. data/spec/integration/simple/multiple_brokers_spec.rb +8 -0
  45. data/spec/integration/simple/simple_producer_and_consumer_spec.rb +97 -0
  46. data/spec/integration/simple/spec_helper.rb +17 -0
  47. data/spec/integration/simple/unavailable_broker_spec.rb +77 -0
  48. data/spec/spec_helper.rb +32 -0
  49. data/spec/test_cluster.rb +205 -0
  50. data/spec/unit/broker_pool_spec.rb +77 -0
  51. data/spec/unit/cluster_metadata_spec.rb +41 -0
  52. data/spec/unit/compression_spec.rb +17 -0
  53. data/spec/unit/connection_spec.rb +4 -0
  54. data/spec/unit/fetched_message_spec.rb +11 -0
  55. data/spec/unit/message_conductor_spec.rb +147 -0
  56. data/spec/unit/message_set_spec.rb +42 -0
  57. data/spec/unit/message_spec.rb +112 -0
  58. data/spec/unit/message_to_send_spec.rb +10 -0
  59. data/spec/unit/messages_for_broker_spec.rb +54 -0
  60. data/spec/unit/messages_to_send_batch_spec.rb +25 -0
  61. data/spec/unit/messages_to_send_spec.rb +63 -0
  62. data/spec/unit/partition_consumer_spec.rb +124 -0
  63. data/spec/unit/producer_compression_config_spec.rb +35 -0
  64. data/spec/unit/producer_spec.rb +45 -0
  65. data/spec/unit/protocol_spec.rb +54 -0
  66. data/spec/unit/sync_producer_spec.rb +141 -0
  67. data/spec/unit/topic_metadata_spec.rb +17 -0
  68. metadata +206 -0
@@ -0,0 +1,122 @@
1
+ module Poseidon
2
+ # @api private
3
+ module Protocol
4
+ require "poseidon/protocol/protocol_struct"
5
+ require "poseidon/protocol/request_buffer"
6
+ require "poseidon/protocol/response_buffer"
7
+
8
+ API_KEYS = {
9
+ :produce => 0,
10
+ :fetch => 1,
11
+ :offset => 2,
12
+ :metadata => 3
13
+ }
14
+
15
+ # Request/Response Common Structures
16
+ RequestCommon = ProtocolStruct.new(:api_key => :int16,
17
+ :api_version => :int16,
18
+ :correlation_id => :int32,
19
+ :client_id => :string)
20
+ ResponseCommon = ProtocolStruct.new(:correlation_id => :int32)
21
+
22
+ # MessageSet Common Structure
23
+ MessageStruct = ProtocolStruct.new(:magic_type => :int8,
24
+ :attributes => :int8,
25
+ :key => :bytes,
26
+ :value => :bytes).prepend_size.prepend_crc32.truncatable
27
+ MessageWithOffsetStruct = ProtocolStruct.new(:offset => :int64,
28
+ :message => MessageStruct)
29
+
30
+ # When part of produce requests of fetch responses a MessageSet
31
+ # has a prepended size. When a MessageSet is compressed and
32
+ # nested in a Message size is not prepended.
33
+ MessageSetStruct = ProtocolStruct.new(:messages => [Message]).
34
+ size_bound_array(:messages)
35
+ MessageSetStructWithSize = MessageSetStruct.dup.prepend_size
36
+
37
+ # Produce Request
38
+ MessagesForPartition = ProtocolStruct.new(:partition => :int32,
39
+ :message_set => MessageSet)
40
+ MessagesForTopic = ProtocolStruct.new(:topic => :string,
41
+ :messages_for_partitions =>
42
+ [MessagesForPartition])
43
+ ProduceRequest = ProtocolStruct.new(:common => RequestCommon,
44
+ :required_acks => :int16,
45
+ :timeout => :int32,
46
+ :messages_for_topics => [MessagesForTopic])
47
+
48
+ # Produce Response
49
+ ProducePartitionResponse = ProtocolStruct.new(:partition => :int32,
50
+ :error => :int16,
51
+ :offset => :int64)
52
+ ProduceTopicResponse = ProtocolStruct.new(:topic => :string,
53
+ :partitions => [ProducePartitionResponse])
54
+ ProduceResponse = ProtocolStruct.new(:common => ResponseCommon,
55
+ :topic_response => [ProduceTopicResponse])
56
+
57
+ # Fetch Request
58
+ PartitionFetch = ProtocolStruct.new(:partition => :int32,
59
+ :fetch_offset => :int64,
60
+ :max_bytes => :int32)
61
+ TopicFetch = ProtocolStruct.new(:topic => :string,
62
+ :partition_fetches => [PartitionFetch])
63
+ FetchRequest = ProtocolStruct.new(:common => RequestCommon,
64
+ :replica_id => :int32,
65
+ :max_wait_time => :int32,
66
+ :min_bytes => :int32,
67
+ :topic_fetches => [TopicFetch])
68
+
69
+ # Fetch Response
70
+ PartitionFetchResponse = ProtocolStruct.new(:partition => :int32,
71
+ :error => :int16,
72
+ :highwater_mark_offset => :int64,
73
+ :message_set => MessageSet)
74
+ TopicFetchResponse = ProtocolStruct.new(:topic => :string,
75
+ :partition_fetch_responses => [PartitionFetchResponse])
76
+ FetchResponse = ProtocolStruct.new(
77
+ :common => ResponseCommon,
78
+ :topic_fetch_responses => [TopicFetchResponse])
79
+
80
+ # Offset Request
81
+ PartitionOffsetRequest = ProtocolStruct.new(:partition => :int32,
82
+ :time => :int64,
83
+ :max_number_of_offsets => :int32)
84
+ TopicOffsetRequest = ProtocolStruct.new(
85
+ :topic => :string,
86
+ :partition_offset_requests => [PartitionOffsetRequest])
87
+ OffsetRequest = ProtocolStruct.new(:common => RequestCommon,
88
+ :replica_id => :int32,
89
+ :topic_offset_requests => [TopicOffsetRequest])
90
+
91
+ # Offset Response
92
+ Offset = ProtocolStruct.new(:offset => :int64)
93
+ PartitionOffset = ProtocolStruct.new(:partition => :int32,
94
+ :error => :int16,
95
+ :offsets => [Offset])
96
+ TopicOffsetResponse = ProtocolStruct.new(:topic => :string,
97
+ :partition_offsets => [PartitionOffset])
98
+ OffsetResponse = ProtocolStruct.new(
99
+ :common => ResponseCommon,
100
+ :topic_offset_responses => [TopicOffsetResponse])
101
+
102
+ # Metadata Request
103
+ MetadataRequest = ProtocolStruct.new( :common => RequestCommon,
104
+ :topic_names => [:string])
105
+
106
+ # Metadata Response
107
+ Broker = ProtocolStruct.new(:id => :int32,
108
+ :host => :string,
109
+ :port => :int32)
110
+ PartitionMetadata = ProtocolStruct.new(:error => :int16,
111
+ :id => :int32,
112
+ :leader => :int32,
113
+ :replicas => [:int32],
114
+ :isr => [:int32])
115
+ TopicMetadataStruct = ProtocolStruct.new(:error => :int16,
116
+ :name => :string,
117
+ :partitions => [PartitionMetadata])
118
+ MetadataResponse = ProtocolStruct.new(:common => ResponseCommon,
119
+ :brokers => [Broker],
120
+ :topics => [TopicMetadata])
121
+ end
122
+ end
@@ -0,0 +1,117 @@
1
+ module Poseidon
2
+ # Used by +Producer+ for sending messages to the kafka cluster.
3
+ #
4
+ # You should not use this interface directly
5
+ #
6
+ # Fetches metadata at appropriate times.
7
+ # Builds MessagesToSend
8
+ # Handle MessageBatchToSend lifecyle
9
+ #
10
+ # Who is responsible for fetching metadata from broker seed list?
11
+ # Do we want to be fetching from real live brokers eventually?
12
+ #
13
+ # @api private
14
+ class SyncProducer
15
+ OPTION_DEFAULTS = {
16
+ :compression_codec => nil,
17
+ :compressed_topics => nil,
18
+ :metadata_refresh_interval_ms => 600_000,
19
+ :partitioner => nil,
20
+ :max_send_retries => 3,
21
+ :retry_backoff_ms => 100,
22
+ :required_acks => 0,
23
+ :ack_timeout_ms => 1500,
24
+ }
25
+
26
+ attr_reader :client_id, :retry_backoff_ms, :max_send_retries,
27
+ :metadata_refresh_interval_ms, :required_acks, :ack_timeout_ms
28
+ def initialize(client_id, seed_brokers, options = {})
29
+ @client_id = client_id
30
+
31
+ handle_options(options.dup)
32
+
33
+ @cluster_metadata = ClusterMetadata.new
34
+ @message_conductor = MessageConductor.new(@cluster_metadata, @partitioner)
35
+ @broker_pool = BrokerPool.new(client_id, seed_brokers)
36
+ end
37
+
38
+ def send_messages(messages)
39
+ messages_to_send = MessagesToSend.new(messages, @cluster_metadata)
40
+
41
+ (@max_send_retries+1).times do
42
+ if messages_to_send.needs_metadata? || refresh_interval_elapsed?
43
+ refreshed_metadata = refresh_metadata(messages_to_send.topic_set)
44
+ if !refreshed_metadata
45
+ # If we can't refresh metadata we have to give up.
46
+ break
47
+ end
48
+ end
49
+
50
+ messages_to_send.messages_for_brokers(@message_conductor).each do |messages_for_broker|
51
+ if send_to_broker(messages_for_broker)
52
+ messages_to_send.successfully_sent(messages_for_broker)
53
+ end
54
+ end
55
+
56
+ if messages_to_send.all_sent? || @max_send_retries == 0
57
+ break
58
+ else
59
+ Kernel.sleep retry_backoff_ms / 1000.0
60
+ refresh_metadata(messages_to_send.topic_set)
61
+ end
62
+ end
63
+
64
+ messages_to_send.all_sent?
65
+ end
66
+
67
+ def shutdown
68
+ @broker_pool.shutdown
69
+ end
70
+
71
+ private
72
+ def handle_options(options)
73
+ @ack_timeout_ms = handle_option(options, :ack_timeout_ms)
74
+ @retry_backoff_ms = handle_option(options, :retry_backoff_ms)
75
+
76
+ @metadata_refresh_interval_ms =
77
+ handle_option(options, :metadata_refresh_interval_ms)
78
+
79
+ @required_acks = handle_option(options, :required_acks)
80
+ @max_send_retries = handle_option(options, :max_send_retries)
81
+
82
+ @compression_config = ProducerCompressionConfig.new(
83
+ handle_option(options, :compression_codec),
84
+ handle_option(options, :compressed_topics))
85
+
86
+ @partitioner = handle_option(options, :partitioner)
87
+
88
+ raise ArgumentError, "Unknown options: #{options.keys.inspect}" if options.keys.any?
89
+ end
90
+
91
+ def handle_option(options, sym)
92
+ options.delete(sym) || OPTION_DEFAULTS[sym]
93
+ end
94
+
95
+ def refresh_interval_elapsed?
96
+ (Time.now - @cluster_metadata.last_refreshed_at) > metadata_refresh_interval_ms
97
+ end
98
+
99
+ def refresh_metadata(topics)
100
+ @cluster_metadata.update(@broker_pool.fetch_metadata(topics))
101
+ @broker_pool.update_known_brokers(@cluster_metadata.brokers)
102
+ true
103
+ rescue Errors::UnableToFetchMetadata
104
+ false
105
+ end
106
+
107
+ def send_to_broker(messages_for_broker)
108
+ return false if messages_for_broker.broker_id == -1
109
+ to_send = messages_for_broker.build_protocol_objects(@compression_config)
110
+ @broker_pool.execute_api_call(messages_for_broker.broker_id, :produce,
111
+ required_acks, ack_timeout_ms,
112
+ to_send)
113
+ rescue Connection::ConnectionFailedError
114
+ false
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,65 @@
1
+ module Poseidon
2
+ # @api private
3
+ class TopicMetadata
4
+ # Build a new TopicMetadata object from its binary representation
5
+ #
6
+ # @param [ResponseBuffer] buffer
7
+ # @return [TopicMetadata]
8
+ #
9
+ def self.read(buffer)
10
+ tm = TopicMetadata.new
11
+ tm.struct = Protocol::TopicMetadataStruct.read(buffer)
12
+ tm
13
+ end
14
+
15
+ attr_accessor :struct
16
+ def initialize(struct=nil)
17
+ self.struct = struct
18
+ end
19
+
20
+ # Write a binary representation of the TopicMetadata to buffer
21
+ #
22
+ # @param [RequestBuffer] buffer
23
+ # @return [nil]
24
+ def write(buffer)
25
+ struct.write(buffer)
26
+ nil
27
+ end
28
+
29
+ def partitions
30
+ struct.partitions
31
+ end
32
+
33
+ def name
34
+ struct.name
35
+ end
36
+
37
+ def ==(o)
38
+ eql?(o)
39
+ end
40
+
41
+ def eql?(o)
42
+ struct.eql?(o.struct)
43
+ end
44
+
45
+ def objects_with_errors
46
+ struct.objects_with_errors
47
+ end
48
+
49
+ def leader_available?
50
+ struct.error_class != Errors::LeaderNotAvailable
51
+ end
52
+
53
+ def partition_count
54
+ @partition_count ||= struct.partitions.count
55
+ end
56
+
57
+ def available_partition_leader_ids
58
+ @available_partition_leader_ids ||= struct.partitions.select(&:leader)
59
+ end
60
+
61
+ def available_partition_count
62
+ @available_partition_count ||= available_partition_leader_ids.count
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,4 @@
1
+ module Poseidon
2
+ # Unstable! API May Change!
3
+ VERSION = "0.0.1"
4
+ end
data/lib/poseidon.rb ADDED
@@ -0,0 +1,102 @@
1
+ # Stdlib requires
2
+ require 'socket'
3
+ require 'zlib'
4
+ require 'thread'
5
+ require 'set'
6
+
7
+ # Public API
8
+ require "poseidon/message_to_send"
9
+ require "poseidon/producer"
10
+ require "poseidon/fetched_message"
11
+ require "poseidon/partition_consumer"
12
+
13
+ # Poseidon!
14
+ require "poseidon/message"
15
+ require "poseidon/message_set"
16
+ require "poseidon/topic_metadata"
17
+ require "poseidon/protocol"
18
+
19
+ require "poseidon/broker_pool"
20
+ require "poseidon/cluster_metadata"
21
+ require "poseidon/compression"
22
+ require "poseidon/connection"
23
+ require "poseidon/message_conductor"
24
+ require "poseidon/messages_for_broker"
25
+ require "poseidon/messages_to_send"
26
+ require "poseidon/messages_to_send_batch"
27
+ require "poseidon/producer_compression_config"
28
+ require "poseidon/sync_producer"
29
+ require "poseidon/version"
30
+
31
+ # Top level Poseidon namespace
32
+ #
33
+ # @api public
34
+ module Poseidon
35
+ # Posiedon exception namespace
36
+ module Errors
37
+ # @api private
38
+ class ProtocolError < StandardError; end
39
+
40
+ # Protocol Exceptions
41
+ #
42
+ # These are defined by the Poseidon wire format,
43
+ # they should be caught before being raised to users.
44
+ #
45
+ # @api private
46
+ class UnknownError < ProtocolError; end
47
+ # @api private
48
+ class OffsetOutOfRange < ProtocolError; end
49
+ # @api private
50
+ class InvalidMessage < ProtocolError; end
51
+ # @api private
52
+ class UnknownTopicOrPartition < ProtocolError; end
53
+ # @api private
54
+ class InvalidMessageSize < ProtocolError; end
55
+ # @api private
56
+ class LeaderNotAvailable < ProtocolError; end
57
+ # @api private
58
+ class NotLeaderForPartition < ProtocolError; end
59
+ # @api private
60
+ class RequestTimedOut < ProtocolError; end
61
+ # @api private
62
+ class BrokerNotAvailable < ProtocolError; end
63
+ # @api private
64
+ class ReplicaNotAvailable < ProtocolError; end
65
+ # @api private
66
+ class MessageSizeTooLarge < ProtocolError; end
67
+ # @api private
68
+ class UnrecognizedProtocolError < ProtocolError; end
69
+
70
+ # @api private
71
+ NO_ERROR_CODE = 0
72
+ # @api private
73
+ ERROR_CODES = {
74
+ -1 => UnknownError,
75
+ 1 => OffsetOutOfRange,
76
+ 2 => InvalidMessage,
77
+ 3 => UnknownTopicOrPartition,
78
+ 4 => InvalidMessageSize,
79
+ 5 => LeaderNotAvailable,
80
+ 6 => NotLeaderForPartition,
81
+ 7 => RequestTimedOut,
82
+ 8 => BrokerNotAvailable,
83
+ 9 => ReplicaNotAvailable,
84
+ 10 => MessageSizeTooLarge
85
+ }
86
+
87
+ # Raised when a custom partitioner tries to send
88
+ # a message to a partition that doesn't exist.
89
+ class InvalidPartitionError < StandardError; end
90
+
91
+ # Raised when we are unable to fetch metadata from
92
+ # any of the brokers.
93
+ class UnableToFetchMetadata < StandardError; end
94
+
95
+ # Raised when a messages checksum doesn't match
96
+ class ChecksumError < StandardError; end
97
+
98
+ # Raised when you try to send messages to a producer
99
+ # object that has been #shutdown
100
+ class ProducerShutdownError < StandardError; end
101
+ end
102
+ end
data/poseidon.gemspec ADDED
@@ -0,0 +1,24 @@
1
+ # -*- encoding: utf-8 -*-
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'poseidon/version'
5
+
6
+ Gem::Specification.new do |gem|
7
+ gem.name = "poseidon"
8
+ gem.version = Poseidon::VERSION
9
+ gem.authors = ["Bob Potter"]
10
+ gem.email = ["bobby.potter@gmail.com"]
11
+ gem.description = %q{A Kafka (http://kafka.apache.org/) producer and consumer}
12
+ gem.summary = %q{Poseidon is a producer and consumer implementation for Kafka >= 0.8}
13
+ gem.homepage = "https://github.com/bpot/poseidon"
14
+
15
+ gem.files = `git ls-files`.split($/)
16
+ gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
17
+ gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
18
+ gem.require_paths = ["lib"]
19
+
20
+ gem.add_development_dependency(%q<rspec>)
21
+ gem.add_development_dependency(%q<yard>)
22
+ gem.add_development_dependency(%q<simplecov>)
23
+ gem.add_development_dependency(%q<daemon_controller>)
24
+ end
@@ -0,0 +1,65 @@
1
+ #!/bin/bash
2
+ # Licensed to the Apache Software Foundation (ASF) under one or more
3
+ # contributor license agreements. See the NOTICE file distributed with
4
+ # this work for additional information regarding copyright ownership.
5
+ # The ASF licenses this file to You under the Apache License, Version 2.0
6
+ # (the "License"); you may not use this file except in compliance with
7
+ # the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ if [ $# -lt 1 ];
18
+ then
19
+ echo "USAGE: $0 classname [opts]"
20
+ exit 1
21
+ fi
22
+
23
+ SCALA_VERSION=2.8.0
24
+
25
+ # assume all dependencies have been packaged into one jar with sbt-assembly's task "assembly-package-dependency"
26
+ for file in $KAFKA_PATH/core/target/scala-2.8.0/*.jar;
27
+ do
28
+ CLASSPATH=$CLASSPATH:$file
29
+ done
30
+
31
+ for file in $KAFKA_PATH/perf/target/scala-${SCALA_VERSION}/kafka*.jar;
32
+ do
33
+ CLASSPATH=$CLASSPATH:$file
34
+ done
35
+
36
+ # classpath addition for release
37
+ for file in $KAFKA_PATH/libs/*.jar;
38
+ do
39
+ CLASSPATH=$CLASSPATH:$file
40
+ done
41
+
42
+ for file in $KAFKA_PATH/kafka*.jar;
43
+ do
44
+ CLASSPATH=$CLASSPATH:$file
45
+ done
46
+
47
+ if [ -z "$KAFKA_JMX_OPTS" ]; then
48
+ KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
49
+ fi
50
+
51
+ if [ -z "$KAFKA_OPTS" ]; then
52
+ KAFKA_OPTS="-Xmx512M -server -Dlog4j.configuration=file:$KAFKA_PATH/config/log4j.properties"
53
+ fi
54
+
55
+ if [ $JMX_PORT ]; then
56
+ KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
57
+ fi
58
+
59
+ if [ -z "$JAVA_HOME" ]; then
60
+ JAVA="java"
61
+ else
62
+ JAVA="$JAVA_HOME/bin/java"
63
+ fi
64
+
65
+ exec $JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH "$@"
@@ -0,0 +1,39 @@
1
+ require 'integration/multiple_brokers/spec_helper'
2
+
3
+ describe "round robin sending" do
4
+ describe "with small message batches" do
5
+ it "evenly distributes messages across brokers" do
6
+ c = Connection.new("localhost", 9092, "metadata_fetcher")
7
+ md = c.topic_metadata(["test"])
8
+ sleep 1
9
+ md = c.topic_metadata(["test"])
10
+
11
+ test_topic = md.topics.first
12
+
13
+ consumers = test_topic.partitions.map do |partition|
14
+ leader_id = partition.leader
15
+ broker = md.brokers.find { |b| b.id == leader_id }
16
+ PartitionConsumer.new("test_consumer_#{partition.id}", broker.host,
17
+ broker.port, "test", partition.id, -1)
18
+ end
19
+
20
+ # Update offsets to current position before adding test messages
21
+ consumers.each do |c|
22
+ c.fetch
23
+ end
24
+
25
+
26
+ @p = Producer.new(["localhost:9092","localhost:9093","localhost:9094"], "test",
27
+ :required_acks => 1)
28
+
29
+ 24.times do
30
+ @p.send_messages([MessageToSend.new("test", "hello")])
31
+ end
32
+
33
+ consumers.each do |c|
34
+ messages = c.fetch
35
+ expect(messages.size).to eq(8)
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,34 @@
1
+ require 'spec_helper'
2
+
3
+ require 'test_cluster'
4
+
5
+ class ThreeBrokerCluster
6
+ def initialize
7
+ @zookeeper = ZookeeperRunner.new
8
+ @brokers = (9092..9094).map { |port| BrokerRunner.new(port - 9092, port, 3) }
9
+ end
10
+
11
+ def start
12
+ @zookeeper.start
13
+ @brokers.each(&:start)
14
+ end
15
+
16
+ def stop
17
+ @zookeeper.stop
18
+ @brokers.each(&:stop)
19
+ end
20
+ end
21
+
22
+ RSpec.configure do |config|
23
+ config.before(:suite) do
24
+ JavaRunner.remove_tmp
25
+ JavaRunner.set_kafka_path!
26
+ $tc = ThreeBrokerCluster.new
27
+ $tc.start
28
+ sleep 5 # wait for cluster to come up
29
+ end
30
+
31
+ config.after(:suite) do
32
+ $tc.stop
33
+ end
34
+ end
@@ -0,0 +1,20 @@
1
+ require 'integration/simple/spec_helper'
2
+
3
+ describe "compression" do
4
+ it "roundtrips" do
5
+ i = rand(1000)
6
+
7
+ @consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
8
+ "test12", 0, -2)
9
+
10
+ @producer = Producer.new(["localhost:9092"],
11
+ "test_client",
12
+ :type => :sync,
13
+ :compression_codec => :gzip)
14
+ messages = [MessageToSend.new("test12", "Hello World: #{i}")]
15
+
16
+ expect(@producer.send_messages(messages)).to eq(true)
17
+ messages = @consumer.fetch
18
+ expect(messages.last.value).to eq("Hello World: #{i}")
19
+ end
20
+ end
@@ -0,0 +1,33 @@
1
+ require 'integration/simple/spec_helper'
2
+
3
+ include Protocol
4
+ describe Connection do
5
+ before(:each) do
6
+ @connection = Connection.new("localhost", 9092, "test")
7
+ end
8
+
9
+ it 'sends and parses topic metadata requests' do
10
+ @connection.topic_metadata(["test2"])
11
+ end
12
+
13
+ it 'sends and parsers produce requests' do
14
+ message = MessageStruct.new(0, 0, nil, "hello")
15
+ message_with_offset = MessageWithOffsetStruct.new(0, message)
16
+ message_set = MessageSetStruct.new([message_with_offset])
17
+ messages_for_partitions = [MessagesForPartition.new(0,message_set)]
18
+ messages_for_topics = [MessagesForTopic.new("test2",messages_for_partitions)]
19
+ @connection.produce(1, 10_000, messages_for_topics)
20
+ end
21
+
22
+ it 'sends and parsers fetch requests' do
23
+ partition_fetches = [PartitionFetch.new(0,0,1024*1024)]
24
+ topic_fetches = [TopicFetch.new("test2", partition_fetches)]
25
+ @connection.fetch(1000, 0, topic_fetches)
26
+ end
27
+
28
+ it 'sends and parsers offset requests' do
29
+ partition_offset_requests = [PartitionOffsetRequest.new(0,-1,1000)]
30
+ offset_topic_requests = [TopicOffsetRequest.new("test2", partition_offset_requests)]
31
+ @connection.offset(offset_topic_requests)
32
+ end
33
+ end
@@ -0,0 +1,8 @@
1
+ require 'integration/simple/spec_helper'
2
+
3
+ describe "three brokers in cluster" do
4
+ describe "sending batches of 1 message" do
5
+ it "sends messages to all brokers" do
6
+ end
7
+ end
8
+ end