poseidon 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. data/.gitignore +19 -0
  2. data/.rspec +2 -0
  3. data/.travis.yml +12 -0
  4. data/.yardopts +8 -0
  5. data/Gemfile +13 -0
  6. data/LICENSE.txt +22 -0
  7. data/README.md +71 -0
  8. data/Rakefile +17 -0
  9. data/TODO.md +27 -0
  10. data/examples/consumer.rb +18 -0
  11. data/examples/producer.rb +9 -0
  12. data/lib/poseidon/broker_pool.rb +72 -0
  13. data/lib/poseidon/cluster_metadata.rb +63 -0
  14. data/lib/poseidon/compressed_value.rb +23 -0
  15. data/lib/poseidon/compression/gzip_codec.rb +23 -0
  16. data/lib/poseidon/compression/snappy_codec.rb +17 -0
  17. data/lib/poseidon/compression.rb +30 -0
  18. data/lib/poseidon/connection.rb +138 -0
  19. data/lib/poseidon/fetched_message.rb +37 -0
  20. data/lib/poseidon/message.rb +151 -0
  21. data/lib/poseidon/message_conductor.rb +84 -0
  22. data/lib/poseidon/message_set.rb +80 -0
  23. data/lib/poseidon/message_to_send.rb +33 -0
  24. data/lib/poseidon/messages_for_broker.rb +39 -0
  25. data/lib/poseidon/messages_to_send.rb +47 -0
  26. data/lib/poseidon/messages_to_send_batch.rb +27 -0
  27. data/lib/poseidon/partition_consumer.rb +154 -0
  28. data/lib/poseidon/producer.rb +193 -0
  29. data/lib/poseidon/producer_compression_config.rb +36 -0
  30. data/lib/poseidon/protocol/protocol_struct.rb +238 -0
  31. data/lib/poseidon/protocol/request_buffer.rb +78 -0
  32. data/lib/poseidon/protocol/response_buffer.rb +72 -0
  33. data/lib/poseidon/protocol.rb +122 -0
  34. data/lib/poseidon/sync_producer.rb +117 -0
  35. data/lib/poseidon/topic_metadata.rb +65 -0
  36. data/lib/poseidon/version.rb +4 -0
  37. data/lib/poseidon.rb +102 -0
  38. data/poseidon.gemspec +24 -0
  39. data/spec/bin/kafka-run-class.sh +65 -0
  40. data/spec/integration/multiple_brokers/round_robin_spec.rb +39 -0
  41. data/spec/integration/multiple_brokers/spec_helper.rb +34 -0
  42. data/spec/integration/simple/compression_spec.rb +20 -0
  43. data/spec/integration/simple/connection_spec.rb +33 -0
  44. data/spec/integration/simple/multiple_brokers_spec.rb +8 -0
  45. data/spec/integration/simple/simple_producer_and_consumer_spec.rb +97 -0
  46. data/spec/integration/simple/spec_helper.rb +17 -0
  47. data/spec/integration/simple/unavailable_broker_spec.rb +77 -0
  48. data/spec/spec_helper.rb +32 -0
  49. data/spec/test_cluster.rb +205 -0
  50. data/spec/unit/broker_pool_spec.rb +77 -0
  51. data/spec/unit/cluster_metadata_spec.rb +41 -0
  52. data/spec/unit/compression_spec.rb +17 -0
  53. data/spec/unit/connection_spec.rb +4 -0
  54. data/spec/unit/fetched_message_spec.rb +11 -0
  55. data/spec/unit/message_conductor_spec.rb +147 -0
  56. data/spec/unit/message_set_spec.rb +42 -0
  57. data/spec/unit/message_spec.rb +112 -0
  58. data/spec/unit/message_to_send_spec.rb +10 -0
  59. data/spec/unit/messages_for_broker_spec.rb +54 -0
  60. data/spec/unit/messages_to_send_batch_spec.rb +25 -0
  61. data/spec/unit/messages_to_send_spec.rb +63 -0
  62. data/spec/unit/partition_consumer_spec.rb +124 -0
  63. data/spec/unit/producer_compression_config_spec.rb +35 -0
  64. data/spec/unit/producer_spec.rb +45 -0
  65. data/spec/unit/protocol_spec.rb +54 -0
  66. data/spec/unit/sync_producer_spec.rb +141 -0
  67. data/spec/unit/topic_metadata_spec.rb +17 -0
  68. metadata +206 -0
data/.gitignore ADDED
@@ -0,0 +1,19 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ test/tmp
16
+ test/version_tmp
17
+ tmp
18
+ *.log
19
+ *.log.*
data/.rspec ADDED
@@ -0,0 +1,2 @@
1
+ --color
2
+ --format progress
data/.travis.yml ADDED
@@ -0,0 +1,12 @@
1
+ laguage: ruby
2
+ rvm:
3
+ - 1.9.3
4
+ - 2.0.0
5
+ - ruby-head
6
+ - jruby-19mode
7
+ - jruby-head
8
+ - rbx-19mode
9
+ matrix:
10
+ allow_failures:
11
+ - rvm: ruby-head
12
+ - rvm: rbx-19mode
data/.yardopts ADDED
@@ -0,0 +1,8 @@
1
+ --markup-provider=redcarpet
2
+ --markup=markdown
3
+ --no-private
4
+ --files LICENSE.txt
5
+ --exclude '~$'
6
+ --title 'Poseidon (Kafka library for Ruby)'
7
+ --api public
8
+ --readme README.md
data/Gemfile ADDED
@@ -0,0 +1,13 @@
1
+ source 'https://rubygems.org'
2
+
3
+ gem 'rake'
4
+
5
+ # Specify your gem's dependencies in poseidon.gemspec
6
+ gemspec
7
+
8
+ gem 'coveralls', require: false
9
+
10
+ group :development do
11
+ gem 'github-markup', :platform => :ruby
12
+ gem 'redcarpet', :platform => :ruby
13
+ end
data/LICENSE.txt ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2013 Bob Potter
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,71 @@
1
+ # Poseidon [![Build Status](https://travis-ci.org/bpot/poseidon.png?branch=master)](https://travis-ci.org/bpot/poseidon) [![Code Climate](https://codeclimate.com/github/bpot/poseidon.png)](https://codeclimate.com/github/bpot/poseidon)
2
+
3
+ Poseidon is a Kafka client. Poseidon only supports the 0.8 API and above.
4
+
5
+ **Until 1.0.0 this should be considered ALPHA software and not neccessarily production ready.**
6
+
7
+ * [API Documentation](http://rubydoc.info/github/bpot/poseidon)
8
+
9
+ ## Usage
10
+
11
+ ### Installing a Kafka broker locally
12
+
13
+ Follow the [instructions](https://cwiki.apache.org/KAFKA/kafka-08-quick-start.html) on the Kafka wiki to build Kafka 0.8 and get a test broker up and running.
14
+
15
+ ### Sending messages to Kafka
16
+
17
+ ```ruby
18
+ require 'poseidon'
19
+
20
+ producer = Poseidon::Producer.new(["localhost:9092"], "my_test_producer")
21
+
22
+ messages = []
23
+ messages << Poseidon::MessageToSend.new("topic1", "value1")
24
+ messages << Poseidon::MessageToSend.new("topic2", "value2")
25
+ producer.send_messages(messages)
26
+ ```
27
+
28
+ More detailed [Poseidon::Producer](http://rubydoc.info/github/bpot/poseidon/Poseidon/Producer) documentation.
29
+
30
+
31
+ ### Fetching messages from Kafka
32
+
33
+ ```ruby
34
+ require 'poseidon'
35
+
36
+ consumer = Poseidon::PartitionConsumer.new("my_test_consumer", "localhost", 9092,
37
+ "topic1", 0, :earliest_offset)
38
+
39
+ loop do
40
+ messages = consumer.fetch
41
+ messages.each do |m|
42
+ puts m.value
43
+ end
44
+ end
45
+ ```
46
+
47
+ More detailed [Poseidon::PartitionConsumer](http://rubydoc.info/github/bpot/poseidon/Poseidon/PartitionConsumer) documentation.
48
+
49
+ ## Semantic Versioning
50
+
51
+ This gem follows [SemVer](http://semver.org). In particular, the public API should not be considered stable and anything may change without warning until Version 1.0.0. Additionally, for the purposes of the versioning the public API is everything documented in the [public API docs](http://rubydoc.info/github/bpot/poseidon).
52
+
53
+ ## Requirements
54
+
55
+ * Ruby 1.9.3 or higher (1.9.2 and below not supported!!!)
56
+ * Kafka 0.8 or higher
57
+
58
+ ## Integration Tests
59
+
60
+ In order to run integration tests you must specify a `KAFKA_PATH` environment variable which points to a built Kafka installation. There are more detailed [instructions](https://cwiki.apache.org/KAFKA/kafka-08-quick-start.html) on the Kafka wiki, but the following should allow you to run integration tests.
61
+
62
+ # cd ~/src/
63
+ # git clone https://git-wip-us.apache.org/repos/asf/kafka.git
64
+ # git checkout -b 0.8 remotes/origin/0.8
65
+ # ./sbt update
66
+ # ./sbt package
67
+ # ./sbt assembly-package-dependency
68
+ # cd ~/src/poseidon/
69
+ # KAFKA_PATH=~/src/kafka rake spec:integration:simple
70
+
71
+ The poseidon test suite will take care of spinning up and down the broker(s) needed for the integration tests.
data/Rakefile ADDED
@@ -0,0 +1,17 @@
1
+ require 'bundler/gem_tasks'
2
+ require 'rspec/core/rake_task'
3
+
4
+ RSpec::Core::RakeTask.new("spec:unit") do |t|
5
+ t.pattern = 'spec/unit/*_spec.rb'
6
+ end
7
+
8
+ RSpec::Core::RakeTask.new('spec:integration:simple') do |t|
9
+ t.pattern = 'spec/integration/simple/*_spec.rb'
10
+ end
11
+
12
+ RSpec::Core::RakeTask.new('spec:integration:multiple_brokers') do |t|
13
+ t.pattern = 'spec/integration/multiple_brokers/*_spec.rb'
14
+ end
15
+
16
+ task :spec => 'spec:unit'
17
+ task :default => 'spec:unit'
data/TODO.md ADDED
@@ -0,0 +1,27 @@
1
+ ### 0.0.1
2
+ * Ensure that protocol errors are being handled correctly and not bubbling up
3
+ * More integration tests, replication, leader changes, etc. Investigate interesting cases in kafka's tests
4
+ * End-to-end integration specs
5
+ - In specs that test broker failure, verify that messages were actually sent/not sent with a consumer.
6
+
7
+ * AsyncProducer
8
+ - Implement a bounded queue, sending thread, etc
9
+ * Cleanup: extract protocol struct delegation to a module.
10
+ * When failing to send messages in sync producer, return messages that failed to send?
11
+
12
+ ### 0.0.2
13
+
14
+ * New Consumer/Consumer Enhancements
15
+ - Automatically partition work among consumers (zookeeper, redis, pluggable?)
16
+ - Handle case where the offset we're trying to read from no longer exists
17
+
18
+ * Snappy Compression
19
+ - snappy: c-ext, would like to avoid
20
+ - snappy_ffi: ffi interface, but needs to be updated (pre c-api)
21
+ and has no specs, docs. Also linked to a c-ext version, two gems, etc..
22
+ - new snappy ffi library with specs, docs, etc. Shave that Yak!
23
+
24
+ * Benchmark/Profiling. KGIO?
25
+
26
+ ### 0.0.3 -- Targets Kafka 0.8.1
27
+ - Offset API
@@ -0,0 +1,18 @@
1
+ $:.unshift File.expand_path(File.dirname(__FILE__) + '/../lib')
2
+ require 'poseidon'
3
+
4
+ producer = Poseidon::PartitionConsumer.new("example_consumer", "localhost", 9092,
5
+ "example", 0, :earliest_offset)
6
+
7
+ loop do
8
+ begin
9
+ messages = producer.fetch
10
+ messages.each do |m|
11
+ puts "Received message: #{m.value}"
12
+ end
13
+ rescue Poseidon::Errors::UnknownTopicOrPartition
14
+ puts "Topic does not exist yet"
15
+ end
16
+
17
+ sleep 1
18
+ end
@@ -0,0 +1,9 @@
1
+ $:.unshift File.expand_path(File.dirname(__FILE__) + '/../lib')
2
+ require 'poseidon'
3
+
4
+ producer = Poseidon::Producer.new(["localhost:9092"], "example_producer")
5
+
6
+ loop do
7
+ producer.send_messages([Poseidon::MessageToSend.new("example", Time.now.to_s)])
8
+ sleep 1
9
+ end
@@ -0,0 +1,72 @@
1
+ module Poseidon
2
+ # BrokerPool allows you to send api calls to the a brokers Connection.
3
+ #
4
+ # @api private
5
+ class BrokerPool
6
+ class UnknownBroker < StandardError; end
7
+
8
+ # @param [String] client_id
9
+ def initialize(client_id, seed_brokers)
10
+ @connections = {}
11
+ @brokers = {}
12
+ @client_id = client_id
13
+ @seed_brokers = seed_brokers
14
+ end
15
+
16
+ def fetch_metadata(topics)
17
+ @seed_brokers.each do |broker|
18
+ if metadata = fetch_metadata_from_broker(broker, topics)
19
+ return metadata
20
+ end
21
+ end
22
+ raise Errors::UnableToFetchMetadata
23
+ end
24
+
25
+ # Update the brokers we know about
26
+ #
27
+ # TODO break connection when a brokers info changes?
28
+ #
29
+ # @param [Hash<Integer,Hash>] brokers
30
+ # Hash of broker_id => { :host => host, :port => port }
31
+ def update_known_brokers(brokers)
32
+ @brokers.update(brokers)
33
+ nil
34
+ end
35
+
36
+ # Executes an api call on the connection
37
+ #
38
+ # @param [Integer] broker_id id of the broker we want to execute it on
39
+ # @param [Symbol] api_call
40
+ # the api call we want to execute (:produce,:fetch,etc)
41
+ def execute_api_call(broker_id, api_call, *args)
42
+ connection(broker_id).send(api_call, *args)
43
+ end
44
+
45
+ # Closes all open connections to brokers
46
+ def shutdown
47
+ @brokers.values(&:close)
48
+ @brokers = {}
49
+ end
50
+
51
+ private
52
+ def fetch_metadata_from_broker(broker, topics)
53
+ host, port = broker.split(":")
54
+ c = Connection.new(host, port, @client_id)
55
+ c.topic_metadata(topics)
56
+ rescue Connection::ConnectionFailedError
57
+ return nil
58
+ end
59
+
60
+ def connection(broker_id)
61
+ @connections[broker_id] ||= new_connection(broker_id)
62
+ end
63
+
64
+ def new_connection(broker_id)
65
+ info = @brokers[broker_id]
66
+ if info.nil?
67
+ raise UnknownBroker
68
+ end
69
+ Connection.new(info[:host], info[:port], @client_id)
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,63 @@
1
+ module Poseidon
2
+ # Encapsulates what we known about brokers, topics and partitions
3
+ # from Metadata API calls.
4
+ #
5
+ # @api private
6
+ class ClusterMetadata
7
+ attr_reader :brokers, :last_refreshed_at, :topic_metadata
8
+ def initialize
9
+ @brokers = {}
10
+ @topic_metadata = {}
11
+ @last_refreshed_at = nil
12
+ end
13
+
14
+ # Update what we know about the cluter based on MetadataResponse
15
+ #
16
+ # @param [MetadataResponse] topic_metadata_response
17
+ # @return nil
18
+ def update(topic_metadata_response)
19
+ update_brokers(topic_metadata_response.brokers)
20
+ update_topics(topic_metadata_response.topics)
21
+
22
+ @last_refreshed_at = Time.now
23
+ nil
24
+ end
25
+
26
+ # Do we have metadata for these topics already?
27
+ #
28
+ # @param [Enumberable<String>] topic_names A set of topics.
29
+ # @return [Boolean] true if we have metadata for all +topic_names+, otherwise false.
30
+ def have_metadata_for_topics?(topic_names)
31
+ topic_names.all? { |topic| @topic_metadata[topic] }
32
+ end
33
+
34
+ # Provides metadata for each topic
35
+ #
36
+ # @param [Enumerable<String>] topic_names Topics we should return metadata for
37
+ # @return [Hash<String,TopicMetadata>]
38
+ def metadata_for_topics(topic_names)
39
+ Hash[topic_names.map { |name| [name, @topic_metadata[name]] }]
40
+ end
41
+
42
+ # Provides a Broker object for +broker_id+. This corresponds to the
43
+ # broker ids in the TopicMetadata objects.
44
+ #
45
+ # @param [Integer] broker_id Broker id
46
+ def broker(broker_id)
47
+ @brokers[broker_id]
48
+ end
49
+
50
+ private
51
+ def update_topics(topics)
52
+ topics.each do |topic|
53
+ @topic_metadata[topic.name] = topic
54
+ end
55
+ end
56
+
57
+ def update_brokers(brokers)
58
+ brokers.each do |broker|
59
+ @brokers[broker.id] = broker
60
+ end
61
+ end
62
+ end
63
+ end
@@ -0,0 +1,23 @@
1
+ module Poseidon
2
+ # @api private
3
+ class CompressedValue
4
+ def initialize(value, codec_id)
5
+ @value = value
6
+ @codec_id = codec_id
7
+ end
8
+
9
+ # Decompressed value
10
+ #
11
+ # Raises ??? if the compression codec is uknown
12
+ #
13
+ # @return [String] decompressed value
14
+ def decompressed
15
+ @decompressed ||= decompress
16
+ end
17
+
18
+ def compression_codec
19
+ Compression.find_codec(codec_id)
20
+ end
21
+ private
22
+ end
23
+ end
@@ -0,0 +1,23 @@
1
+ module Poseidon
2
+ module Compression
3
+ module GzipCodec
4
+ def self.codec_id
5
+ 1
6
+ end
7
+
8
+ def self.compress(s)
9
+ io = StringIO.new
10
+ io.set_encoding("ASCII-8BIT")
11
+ gz = Zlib::GzipWriter.new io, Zlib::DEFAULT_COMPRESSION, Zlib::DEFAULT_STRATEGY, :encoding => "ASCII-8BIT"
12
+ gz.write s
13
+ gz.close
14
+ io.string
15
+ end
16
+
17
+ def self.decompress(s)
18
+ io = StringIO.new(s)
19
+ Zlib::GzipReader.new(io, :encoding => "ASCII-8BIT").read
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,17 @@
1
+ module Poseidon
2
+ module Compression
3
+ module SnappyCodec
4
+ def self.codec_id
5
+ 2
6
+ end
7
+
8
+ def self.compress(s)
9
+ raise "Unimplemented"
10
+ end
11
+
12
+ def self.decompress(s)
13
+ raise "Unimplemented"
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,30 @@
1
+ module Poseidon
2
+ # @api private
3
+ module Compression
4
+ class UnrecognizedCompressionCodec < StandardError; end
5
+
6
+ require "poseidon/compression/gzip_codec"
7
+ require "poseidon/compression/snappy_codec"
8
+
9
+ CODECS = {
10
+ #0 => no codec
11
+ 1 => GzipCodec,
12
+ 2 => SnappyCodec
13
+ }
14
+
15
+ # Fetches codec module for +codec_id+
16
+ # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Compression
17
+ #
18
+ # @param [Integer] codec_id codec's as defined by the Kafka Protocol
19
+ # @return [Module] codec module for codec_id
20
+ #
21
+ # @private
22
+ def self.find_codec(codec_id)
23
+ codec = CODECS[codec_id]
24
+ if codec.nil?
25
+ raise UnrecognizedCompressionCodec, codec_id
26
+ end
27
+ codec
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,138 @@
1
+ module Poseidon
2
+ # High level internal interface to a remote broker. Provides access to
3
+ # the broker API.
4
+ # @api private
5
+ class Connection
6
+ include Protocol
7
+
8
+ class ConnectionFailedError < StandardError; end
9
+
10
+ API_VERSION = 0
11
+ REPLICA_ID = -1 # Replica id is always -1 for non-brokers
12
+
13
+ attr_reader :host, :port
14
+
15
+ # Create a new connection
16
+ #
17
+ # @param [String] host Host to connect to
18
+ # @param [Integer] port Port broker listens on
19
+ # @param [String] client_id Unique across processes?
20
+ def initialize(host, port, client_id)
21
+ @host = host
22
+ @port = port
23
+
24
+ @client_id = client_id
25
+ end
26
+
27
+ # Close broker connection
28
+ def close
29
+ @socket && @socket.close
30
+ end
31
+
32
+ # Execute a produce call
33
+ #
34
+ # @param [Integer] required_acks
35
+ # @param [Integer] timeout
36
+ # @param [Array<Protocol::MessagesForTopics>] messages_for_topics Messages to send
37
+ # @return [ProduceResponse]
38
+ def produce(required_acks, timeout, messages_for_topics)
39
+ ensure_connected
40
+ req = ProduceRequest.new( request_common(:produce),
41
+ required_acks,
42
+ timeout,
43
+ messages_for_topics)
44
+ send_request(req)
45
+ if required_acks != 0
46
+ read_response(ProduceResponse)
47
+ else
48
+ true
49
+ end
50
+ end
51
+
52
+ # Execute a fetch call
53
+ #
54
+ # @param [Integer] max_wait_time
55
+ # @param [Integer] min_bytes
56
+ # @param [Integer] topic_fetches
57
+ def fetch(max_wait_time, min_bytes, topic_fetches)
58
+ ensure_connected
59
+ req = FetchRequest.new( request_common(:fetch),
60
+ REPLICA_ID,
61
+ max_wait_time,
62
+ min_bytes,
63
+ topic_fetches)
64
+ send_request(req)
65
+ read_response(FetchResponse)
66
+ end
67
+
68
+ def offset(offset_topic_requests)
69
+ ensure_connected
70
+ req = OffsetRequest.new(request_common(:offset),
71
+ REPLICA_ID,
72
+ offset_topic_requests)
73
+ send_request(req)
74
+ read_response(OffsetResponse).topic_offset_responses
75
+ end
76
+
77
+ # Fetch metadata for +topic_names+
78
+ #
79
+ # @param [Enumberable<String>] topic_names
80
+ # A list of topics to retrive metadata for
81
+ # @return [TopicMetadataResponse] metadata for the topics
82
+ def topic_metadata(topic_names)
83
+ ensure_connected
84
+ req = MetadataRequest.new( request_common(:metadata),
85
+ topic_names)
86
+ send_request(req)
87
+ read_response(MetadataResponse)
88
+ end
89
+
90
+ private
91
+ def ensure_connected
92
+ if @socket.nil? || @socket.closed?
93
+ begin
94
+ @socket = TCPSocket.new(@host, @port)
95
+ rescue SystemCallError
96
+ raise ConnectionFailedError
97
+ end
98
+ end
99
+ end
100
+
101
+ def read_response(response_class)
102
+ r = @socket.read(4)
103
+ if r.nil?
104
+ raise ConnectionFailedError
105
+ end
106
+ n = r.unpack("N").first
107
+ s = @socket.read(n)
108
+ buffer = Protocol::ResponseBuffer.new(s)
109
+ response_class.read(buffer)
110
+ rescue Errno::ECONNRESET
111
+ @socket = nil
112
+ raise ConnectionFailedError
113
+ end
114
+
115
+ def send_request(request)
116
+ buffer = Protocol::RequestBuffer.new
117
+ request.write(buffer)
118
+ @socket.write([buffer.to_s.size].pack("N") + buffer.to_s)
119
+ rescue Errno::EPIPE
120
+ @socket = nil
121
+ raise ConnectionFailedError
122
+ end
123
+
124
+ def request_common(request_type)
125
+ RequestCommon.new(
126
+ API_KEYS[request_type],
127
+ API_VERSION,
128
+ next_correlation_id,
129
+ @client_id
130
+ )
131
+ end
132
+
133
+ def next_correlation_id
134
+ @correlation_id ||= 0
135
+ @correlation_id += 1
136
+ end
137
+ end
138
+ end
@@ -0,0 +1,37 @@
1
+ module Poseidon
2
+
3
+ # A message fetched from a Kafka broker.
4
+ #
5
+ # ```
6
+ # fetched_messages = consumer.fetch
7
+ # fetched_messages.each do |fm|
8
+ # puts "Topic: #{fm.topic}"
9
+ # puts "Value #{fm.value}"
10
+ # puts "Key: #{fm.key}"
11
+ # puts "Offset: #{fm.offset}"
12
+ # end
13
+ # ```
14
+ #
15
+ # @param [String] topic
16
+ # Topic this message should be sent to.
17
+ #
18
+ # @param [String] value
19
+ # Value of the message we want to send.
20
+ #
21
+ # @param [String] key
22
+ # Optional. Message's key, used to route a message
23
+ # to a specific broker. Otherwise, keys will be
24
+ # sent to brokers in a round-robin manner.
25
+ #
26
+ # @api public
27
+ class FetchedMessage
28
+ attr_reader :value, :key, :topic, :offset
29
+
30
+ def initialize(topic, value, key, offset)
31
+ @topic = topic
32
+ @value = value
33
+ @key = key
34
+ @offset = offset
35
+ end
36
+ end
37
+ end