poseidon 0.0.4 → 0.0.5.pre1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/.travis.yml +2 -0
  4. data/CHANGES.md +4 -0
  5. data/README.md +4 -9
  6. data/Rakefile +3 -0
  7. data/lib/poseidon.rb +41 -24
  8. data/lib/poseidon/broker_pool.rb +7 -3
  9. data/lib/poseidon/cluster_metadata.rb +17 -1
  10. data/lib/poseidon/connection.rb +33 -11
  11. data/lib/poseidon/message_conductor.rb +2 -2
  12. data/lib/poseidon/messages_for_broker.rb +17 -0
  13. data/lib/poseidon/messages_to_send.rb +4 -4
  14. data/lib/poseidon/partition_consumer.rb +67 -24
  15. data/lib/poseidon/producer.rb +4 -1
  16. data/lib/poseidon/protocol/request_buffer.rb +12 -4
  17. data/lib/poseidon/sync_producer.rb +55 -22
  18. data/lib/poseidon/topic_metadata.rb +23 -8
  19. data/lib/poseidon/version.rb +1 -1
  20. data/log/.gitkeep +0 -0
  21. data/poseidon.gemspec +2 -2
  22. data/spec/integration/multiple_brokers/consumer_spec.rb +1 -1
  23. data/spec/integration/multiple_brokers/metadata_failures_spec.rb +35 -0
  24. data/spec/integration/multiple_brokers/rebalance_spec.rb +67 -0
  25. data/spec/integration/multiple_brokers/round_robin_spec.rb +4 -4
  26. data/spec/integration/multiple_brokers/spec_helper.rb +29 -7
  27. data/spec/integration/simple/compression_spec.rb +1 -0
  28. data/spec/integration/simple/connection_spec.rb +1 -1
  29. data/spec/integration/simple/simple_producer_and_consumer_spec.rb +25 -2
  30. data/spec/integration/simple/spec_helper.rb +2 -2
  31. data/spec/integration/simple/truncated_messages_spec.rb +1 -1
  32. data/spec/integration/simple/unavailable_broker_spec.rb +9 -16
  33. data/spec/spec_helper.rb +3 -0
  34. data/spec/test_cluster.rb +51 -48
  35. data/spec/unit/broker_pool_spec.rb +28 -7
  36. data/spec/unit/cluster_metadata_spec.rb +3 -3
  37. data/spec/unit/message_conductor_spec.rb +27 -14
  38. data/spec/unit/messages_to_send_spec.rb +3 -3
  39. data/spec/unit/partition_consumer_spec.rb +28 -10
  40. data/spec/unit/sync_producer_spec.rb +16 -12
  41. metadata +24 -35
  42. data/spec/bin/kafka-run-class.sh +0 -65
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 4c4852d5b615cd1fe50528b0136602756087d0db
4
- data.tar.gz: 2172c5cd70c8047cb0b0bdd171d5f9ed5cb82b73
3
+ metadata.gz: 6b96a319eb67baf9aa7081d2c2800cf9d24eb672
4
+ data.tar.gz: fb213c7eb1e31de507c3160bdd6fc6a4fd04084c
5
5
  SHA512:
6
- metadata.gz: 3949ab96ce033255e31bc588db0f0895c6a72a91d9bfb56137995f0d5f52bc467dfa660359d440fddca89fe64fd3f6af7a2b28f2476698ba35319f999358a721
7
- data.tar.gz: 2348e329e244a3b4a777e8635143d33ec591c10ee411f40aa69a629487643585e8b303c13f120d51e85a2dde13f5dea5b0cb3290b152e619d3fa07dc6f4769d4
6
+ metadata.gz: 1f6daff2a688fafccd154005ef4a3c99c1cb0b3b4cadaf3776695b49607b25f4e2b58863341aa3a15bc47fdcfa40cfa16de0860c3c75d68a28b237185c94c6e7
7
+ data.tar.gz: 0861e6e8539a62174f01ea7bf9cd3d8c6bec9b77b38488465484e08a670d38713bd8c61636b6496818052f85cf9003eb2ec40e11ae553d28d8ae5b2b22bcb69f
data/.gitignore CHANGED
@@ -17,3 +17,5 @@ test/version_tmp
17
17
  tmp
18
18
  *.log
19
19
  *.log.*
20
+ tags
21
+ .rvmrc
@@ -2,6 +2,7 @@ laguage: ruby
2
2
  rvm:
3
3
  - 1.9.3
4
4
  - 2.0.0
5
+ - 2.1.2
5
6
  - ruby-head
6
7
  - jruby-19mode
7
8
  - jruby-head
@@ -9,4 +10,5 @@ rvm:
9
10
  matrix:
10
11
  allow_failures:
11
12
  - rvm: ruby-head
13
+ - rvm: jruby-head
12
14
  - rvm: rbx-19mode
data/CHANGES.md CHANGED
@@ -1,3 +1,7 @@
1
+ # 0.0.5 (Unreleased)
2
+
3
+ * Fix serious bug where we would send messages to the wrong partition [GH-36]. (Thanks @sclasen and @jorgeortiz85 for tracking this down.)
4
+
1
5
  # 0.0.4
2
6
 
3
7
  * Don't truncate UTF8 Messages [GH-18]
data/README.md CHANGED
@@ -13,7 +13,7 @@ Poseidon is a Kafka client. Poseidon only supports the 0.8 API and above.
13
13
 
14
14
  ### Installing a Kafka broker locally
15
15
 
16
- Follow the [instructions](https://cwiki.apache.org/KAFKA/kafka-08-quick-start.html) on the Kafka wiki to build Kafka 0.8 and get a test broker up and running.
16
+ Follow the [instructions](http://kafka.apache.org/documentation.html#quickstart) on the Kafka wiki to build Kafka 0.8 and get a test broker up and running.
17
17
 
18
18
  ### Sending messages to Kafka
19
19
 
@@ -60,15 +60,10 @@ This gem follows [SemVer](http://semver.org). In particular, the public API shou
60
60
 
61
61
  ## Integration Tests
62
62
 
63
- In order to run integration tests you must specify a `KAFKA_PATH` environment variable which points to a built Kafka installation. There are more detailed [instructions](https://cwiki.apache.org/KAFKA/kafka-08-quick-start.html) on the Kafka wiki, but the following should allow you to run integration tests.
63
+ In order to run integration tests you must specify a `KAFKA_PATH` environment variable which points to a built Kafka installation. To build Kafka locally follow the [instructions](http://kafka.apache.org/documentation.html#quickstart) provided by the project.
64
64
 
65
- # cd ~/src/
66
- # git clone https://git-wip-us.apache.org/repos/asf/kafka.git
67
- # git checkout -b 0.8 remotes/origin/0.8
68
- # ./sbt update
69
- # ./sbt package
70
- # ./sbt assembly-package-dependency
71
65
  # cd ~/src/poseidon/
72
- # KAFKA_PATH=~/src/kafka rake spec:integration:simple
66
+ # bundle
67
+ # KAFKA_PATH=~/src/kafka bundle exec rake spec:all # run all unit and integration specs
73
68
 
74
69
  The poseidon test suite will take care of spinning up and down the broker(s) needed for the integration tests.
data/Rakefile CHANGED
@@ -7,11 +7,14 @@ end
7
7
 
8
8
  RSpec::Core::RakeTask.new('spec:integration:simple') do |t|
9
9
  t.pattern = 'spec/integration/simple/*_spec.rb'
10
+ t.rspec_opts = ["--fail-fast", "-f d"]
10
11
  end
11
12
 
12
13
  RSpec::Core::RakeTask.new('spec:integration:multiple_brokers') do |t|
13
14
  t.pattern = 'spec/integration/multiple_brokers/*_spec.rb'
15
+ t.rspec_opts = ["--fail-fast", "-f d"]
14
16
  end
15
17
 
16
18
  task :spec => 'spec:unit'
19
+ task 'spec:all' => ['spec:unit', 'spec:integration:simple', 'spec:integration:multiple_brokers']
17
20
  task :default => 'spec:unit'
@@ -3,30 +3,7 @@ require 'socket'
3
3
  require 'zlib'
4
4
  require 'thread'
5
5
  require 'set'
6
-
7
- # Public API
8
- require "poseidon/message_to_send"
9
- require "poseidon/producer"
10
- require "poseidon/fetched_message"
11
- require "poseidon/partition_consumer"
12
-
13
- # Poseidon!
14
- require "poseidon/message"
15
- require "poseidon/message_set"
16
- require "poseidon/topic_metadata"
17
- require "poseidon/protocol"
18
-
19
- require "poseidon/broker_pool"
20
- require "poseidon/cluster_metadata"
21
- require "poseidon/compression"
22
- require "poseidon/connection"
23
- require "poseidon/message_conductor"
24
- require "poseidon/messages_for_broker"
25
- require "poseidon/messages_to_send"
26
- require "poseidon/messages_to_send_batch"
27
- require "poseidon/producer_compression_config"
28
- require "poseidon/sync_producer"
29
- require "poseidon/version"
6
+ require 'logger'
30
7
 
31
8
  # Top level Poseidon namespace
32
9
  #
@@ -99,4 +76,44 @@ module Poseidon
99
76
  # object that has been #shutdown
100
77
  class ProducerShutdownError < StandardError; end
101
78
  end
79
+
80
+ def self.logger
81
+ @logger ||= null_logger
82
+ end
83
+
84
+ def self.logger=(logger)
85
+ @logger = logger
86
+ end
87
+
88
+ private
89
+ def self.null_logger
90
+ devnull = RUBY_PLATFORM =~ /w32/ ? 'nul' : '/dev/null'
91
+ l = Logger.new(devnull)
92
+ l.level = Logger::INFO
93
+ l
94
+ end
102
95
  end
96
+
97
+ # Public API
98
+ require "poseidon/message_to_send"
99
+ require "poseidon/producer"
100
+ require "poseidon/fetched_message"
101
+ require "poseidon/partition_consumer"
102
+
103
+ # Poseidon!
104
+ require "poseidon/message"
105
+ require "poseidon/message_set"
106
+ require "poseidon/topic_metadata"
107
+ require "poseidon/protocol"
108
+
109
+ require "poseidon/broker_pool"
110
+ require "poseidon/cluster_metadata"
111
+ require "poseidon/compression"
112
+ require "poseidon/connection"
113
+ require "poseidon/message_conductor"
114
+ require "poseidon/messages_for_broker"
115
+ require "poseidon/messages_to_send"
116
+ require "poseidon/messages_to_send_batch"
117
+ require "poseidon/producer_compression_config"
118
+ require "poseidon/sync_producer"
119
+ require "poseidon/version"
@@ -6,16 +6,18 @@ module Poseidon
6
6
  class UnknownBroker < StandardError; end
7
7
 
8
8
  # @param [String] client_id
9
- def initialize(client_id, seed_brokers)
9
+ def initialize(client_id, seed_brokers, socket_timeout_ms)
10
10
  @connections = {}
11
11
  @brokers = {}
12
12
  @client_id = client_id
13
13
  @seed_brokers = seed_brokers
14
+ @socket_timeout_ms = socket_timeout_ms
14
15
  end
15
16
 
16
17
  def fetch_metadata(topics)
17
18
  @seed_brokers.each do |broker|
18
19
  if metadata = fetch_metadata_from_broker(broker, topics)
20
+ Poseidon.logger.debug { "Fetched metadata\n" + metadata.to_s }
19
21
  return metadata
20
22
  end
21
23
  end
@@ -51,10 +53,12 @@ module Poseidon
51
53
  private
52
54
  def fetch_metadata_from_broker(broker, topics)
53
55
  host, port = broker.split(":")
54
- c = Connection.new(host, port, @client_id)
56
+ c = Connection.new(host, port, @client_id, @socket_timeout_ms)
55
57
  c.topic_metadata(topics)
56
58
  rescue Connection::ConnectionFailedError
57
59
  return nil
60
+ ensure
61
+ c && c.close
58
62
  end
59
63
 
60
64
  def connection(broker_id)
@@ -66,7 +70,7 @@ module Poseidon
66
70
  if info.nil?
67
71
  raise UnknownBroker
68
72
  end
69
- Connection.new(info[:host], info[:port], @client_id)
73
+ Connection.new(info[:host], info[:port], @client_id, @socket_timeout_ms)
70
74
  end
71
75
  end
72
76
  end
@@ -57,10 +57,26 @@ module Poseidon
57
57
  end
58
58
  end
59
59
 
60
+ def topics
61
+ @topic_metadata.keys
62
+ end
63
+
64
+ def to_s
65
+ out = ""
66
+ @topic_metadata.each do |topic, metadata|
67
+ out << "Topic: #{topic}"
68
+ out << "-------------------------"
69
+ out << metadata.to_s
70
+ end
71
+ out
72
+ end
73
+
60
74
  private
61
75
  def update_topics(topics)
62
76
  topics.each do |topic|
63
- @topic_metadata[topic.name] = topic
77
+ if topic.exists?
78
+ @topic_metadata[topic.name] = topic
79
+ end
64
80
  end
65
81
  end
66
82
 
@@ -6,6 +6,7 @@ module Poseidon
6
6
  include Protocol
7
7
 
8
8
  class ConnectionFailedError < StandardError; end
9
+ class TimeoutException < Exception; end
9
10
 
10
11
  API_VERSION = 0
11
12
  REPLICA_ID = -1 # Replica id is always -1 for non-brokers
@@ -17,18 +18,19 @@ module Poseidon
17
18
  # @param [String] host Host to connect to
18
19
  # @param [Integer] port Port broker listens on
19
20
  # @param [String] client_id Unique across processes?
20
- def initialize(host, port, client_id)
21
+ def initialize(host, port, client_id, socket_timeout_ms)
21
22
  @host = host
22
23
  @port = port
23
24
 
24
25
  @client_id = client_id
26
+ @socket_timeout_ms = socket_timeout_ms
25
27
  end
26
28
 
27
29
  # Close broker connection
28
30
  def close
29
31
  @socket && @socket.close
30
32
  end
31
-
33
+
32
34
  # Execute a produce call
33
35
  #
34
36
  # @param [Integer] required_acks
@@ -93,32 +95,48 @@ module Poseidon
93
95
  begin
94
96
  @socket = TCPSocket.new(@host, @port)
95
97
  rescue SystemCallError
96
- raise ConnectionFailedError
98
+ raise_connection_failed_error
97
99
  end
98
100
  end
99
101
  end
100
102
 
101
103
  def read_response(response_class)
102
- r = @socket.read(4)
104
+ r = ensure_read_or_timeout(4)
103
105
  if r.nil?
104
- raise ConnectionFailedError
106
+ raise_connection_failed_error
105
107
  end
106
108
  n = r.unpack("N").first
107
- s = @socket.read(n)
109
+ s = ensure_read_or_timeout(n)
108
110
  buffer = Protocol::ResponseBuffer.new(s)
109
111
  response_class.read(buffer)
110
- rescue Errno::ECONNRESET
112
+ rescue Errno::ECONNRESET, SocketError, TimeoutException
111
113
  @socket = nil
112
- raise ConnectionFailedError
114
+ raise_connection_failed_error
115
+ end
116
+
117
+ def ensure_read_or_timeout(maxlen)
118
+ if IO.select([@socket], nil, nil, @socket_timeout_ms / 1000.0)
119
+ @socket.read(maxlen)
120
+ else
121
+ raise TimeoutException.new
122
+ end
113
123
  end
114
124
 
115
125
  def send_request(request)
116
126
  buffer = Protocol::RequestBuffer.new
117
127
  request.write(buffer)
118
- @socket.write([buffer.to_s.bytesize].pack("N") + buffer.to_s)
119
- rescue Errno::EPIPE, Errno::ECONNRESET
128
+ ensure_write_or_timeout([buffer.to_s.bytesize].pack("N") + buffer.to_s)
129
+ rescue Errno::EPIPE, Errno::ECONNRESET, TimeoutException
120
130
  @socket = nil
121
- raise ConnectionFailedError
131
+ raise_connection_failed_error
132
+ end
133
+
134
+ def ensure_write_or_timeout(data)
135
+ if IO.select(nil, [@socket], nil, @socket_timeout_ms / 1000.0)
136
+ @socket.write(data)
137
+ else
138
+ raise TimeoutException.new
139
+ end
122
140
  end
123
141
 
124
142
  def request_common(request_type)
@@ -134,5 +152,9 @@ module Poseidon
134
152
  @correlation_id ||= 0
135
153
  @correlation_id += 1
136
154
  end
155
+
156
+ def raise_connection_failed_error
157
+ raise ConnectionFailedError, "Failed to connect to #{@host}:#{@port}"
158
+ end
137
159
  end
138
160
  end
@@ -31,7 +31,7 @@ module Poseidon
31
31
  topic_metadata = topic_metadatas[topic]
32
32
  if topic_metadata && topic_metadata.leader_available?
33
33
  partition_id = determine_partition(topic_metadata, key)
34
- broker_id = topic_metadata.partitions[partition_id].leader || NO_BROKER
34
+ broker_id = topic_metadata.partition_leader(partition_id) || NO_BROKER
35
35
  else
36
36
  partition_id = NO_PARTITION
37
37
  broker_id = NO_BROKER
@@ -73,7 +73,7 @@ module Poseidon
73
73
  partition_count = topic_metadata.available_partition_count
74
74
 
75
75
  if partition_count > 0
76
- next_partition_counter % partition_count
76
+ topic_metadata.available_partitions[next_partition_counter % partition_count].id
77
77
  else
78
78
  NO_PARTITION
79
79
  end
@@ -35,5 +35,22 @@ module Poseidon
35
35
  Protocol::MessagesForTopic.new(topic, messages_for_partitions)
36
36
  end
37
37
  end
38
+
39
+ # We can always retry these errors because they mean none of the kafka brokers persisted the message
40
+ ALWAYS_RETRYABLE = [Poseidon::Errors::LeaderNotAvailable, Poseidon::Errors::NotLeaderForPartition]
41
+
42
+ def successfully_sent(producer_response)
43
+ failed = []
44
+ producer_response.topic_response.each do |topic_response|
45
+ topic_response.partitions.each do |partition|
46
+ if ALWAYS_RETRYABLE.include?(partition.error_class)
47
+ Poseidon.logger.debug { "Received #{partition.error_class} when attempting to send messages to #{topic_response.topic} on #{partition.partition}" }
48
+ failed.push(*@topics[topic_response.topic][partition.partition])
49
+ end
50
+ end
51
+ end
52
+
53
+ return @messages - failed
54
+ end
38
55
  end
39
56
  end
@@ -30,12 +30,12 @@ module Poseidon
30
30
  MessagesToSendBatch.new(@messages, message_conductor).messages_for_brokers
31
31
  end
32
32
 
33
- def successfully_sent(messages_for_broker)
34
- @messages -= messages_for_broker.messages
33
+ def successfully_sent(messages_sent)
34
+ @messages -= messages_sent
35
35
  end
36
36
 
37
- def all_sent?
38
- !@messages.any?
37
+ def pending_messages?
38
+ @messages.any?
39
39
  end
40
40
 
41
41
  private
@@ -14,18 +14,22 @@ module Poseidon
14
14
 
15
15
  attr_reader :offset
16
16
 
17
+ attr_reader :topic
18
+
17
19
  # Returns a consumer pointing at the lead broker for the partition.
18
20
  #
19
21
  # Eventually this will be replaced by higher level consumer functionality,
20
22
  # this is a stop-gap.
21
23
  #
22
24
  def self.consumer_for_partition(client_id, seed_brokers, topic, partition, offset, options = {})
23
- broker_pool = BrokerPool.new(client_id, seed_brokers)
25
+ broker_pool = BrokerPool.new(client_id, seed_brokers, options[:socket_timeout_ms] || 10_000)
24
26
 
25
27
  cluster_metadata = ClusterMetadata.new
26
28
  cluster_metadata.update(broker_pool.fetch_metadata([topic]))
27
29
 
28
30
  broker = cluster_metadata.lead_broker_for_partition(topic, partition)
31
+ broker_pool.shutdown
32
+
29
33
  new(client_id, broker.host, broker.port, topic, partition, offset, options)
30
34
  end
31
35
 
@@ -38,35 +42,43 @@ module Poseidon
38
42
  # @param [String] topic Topic to read from
39
43
  # @param [Integer] partition Partitions are zero indexed.
40
44
  # @param [Integer,Symbol] offset
41
- # Offset to start reading from.
45
+ # Offset to start reading from. A negative offset can also be passed.
42
46
  # There are a couple special offsets which can be passed as symbols:
43
- # :earliest_offset Start reading from the first offset the server has.
44
- # :latest_offset Start reading from the latest offset the server has.
47
+ # :earliest_offset Start reading from the first offset the server has.
48
+ # :latest_offset Start reading from the latest offset the server has.
45
49
  #
46
50
  # @param [Hash] options
47
51
  # Theses options can all be overridden in each individual fetch command.
48
52
  #
49
53
  # @option options [:max_bytes] Maximum number of bytes to fetch
50
54
  # Default: 1048576 (1MB)
55
+ #
51
56
  # @option options [:max_wait_ms]
52
57
  # How long to block until the server sends us data.
58
+ # NOTE: This is only enforced if min_bytes is > 0.
53
59
  # Default: 100 (100ms)
60
+ #
54
61
  # @option options [:min_bytes] Smallest amount of data the server should send us.
55
- # Default: 0 (Send us data as soon as it is ready)
62
+ # Default: 1 (Send us data as soon as it is ready)
63
+ #
64
+ # @option options [:socket_timeout_ms]
65
+ # How long to wait for reply from server. Should be higher than max_wait_ms.
66
+ # Default: 10000 (10s)
56
67
  #
57
68
  # @api public
58
69
  def initialize(client_id, host, port, topic, partition, offset, options = {})
59
70
  @host = host
60
71
  @port = port
61
72
 
62
- @connection = Connection.new(host, port, client_id)
73
+ handle_options(options)
74
+
75
+ @connection = Connection.new(host, port, client_id, @socket_timeout_ms)
63
76
  @topic = topic
64
77
  @partition = partition
65
78
  if Symbol === offset
66
79
  raise ArgumentError, "Unknown special offset type: #{offset}" unless [:earliest_offset, :latest_offset].include?(offset)
67
80
  end
68
81
  @offset = offset
69
- handle_options(options)
70
82
  end
71
83
 
72
84
  # Fetch messages from the broker.
@@ -75,14 +87,16 @@ module Poseidon
75
87
  #
76
88
  # @option options [:max_bytes]
77
89
  # Maximum number of bytes to fetch
90
+ #
78
91
  # @option options [:max_wait_ms]
79
92
  # How long to block until the server sends us data.
93
+ #
80
94
  # @option options [:min_bytes]
81
95
  # Smallest amount of data the server should send us.
82
96
  #
83
97
  # @api public
84
98
  def fetch(options = {})
85
- fetch_max_wait = options.delete(:max_wait) || max_wait_ms
99
+ fetch_max_wait = options.delete(:max_wait_ms) || max_wait_ms
86
100
  fetch_max_bytes = options.delete(:max_bytes) || max_bytes
87
101
  fetch_min_bytes = options.delete(:min_bytes) || min_bytes
88
102
 
@@ -94,7 +108,14 @@ module Poseidon
94
108
  fetch_response = @connection.fetch(fetch_max_wait, fetch_min_bytes, topic_fetches)
95
109
  topic_response = fetch_response.topic_fetch_responses.first
96
110
  partition_response = topic_response.partition_fetch_responses.first
97
- if partition_response.error != Errors::NO_ERROR_CODE
111
+
112
+ unless partition_response.error == Errors::NO_ERROR_CODE
113
+ if @offset < 0 &&
114
+ Errors::ERROR_CODES[partition_response.error] == Errors::OffsetOutOfRange
115
+ @offset = :earliest_offset
116
+ return fetch(options)
117
+ end
118
+
98
119
  raise Errors::ERROR_CODES[partition_response.error]
99
120
  else
100
121
  @highwater_mark = partition_response.highwater_mark_offset
@@ -116,11 +137,27 @@ module Poseidon
116
137
  @offset
117
138
  end
118
139
 
140
+ # Close the connection to the kafka broker
141
+ #
142
+ # @return [Nil]
143
+ #
144
+ # @api public
145
+ def close
146
+ @connection.close
147
+ nil
148
+ end
149
+
119
150
  private
120
151
  def handle_options(options)
121
- @max_bytes = options.delete(:max_bytes) || 1024*1024
122
- @min_bytes = options.delete(:min_bytes) || 0
123
- @max_wait_ms = options.delete(:max_wait_ms) || 10_000
152
+ @max_bytes = options.delete(:max_bytes) || 1024*1024
153
+ @min_bytes = options.delete(:min_bytes) || 1
154
+ @max_wait_ms = options.delete(:max_wait_ms) || 10_000
155
+ @socket_timeout_ms = options.delete(:socket_timeout_ms) || @max_wait_ms + 10_000
156
+
157
+ if @socket_timeout_ms < @max_wait_ms
158
+ raise ArgumentError, "Setting socket_timeout_ms should be higher than max_wait_ms"
159
+ end
160
+
124
161
  if options.keys.any?
125
162
  raise ArgumentError, "Unknown options: #{options.keys.inspect}"
126
163
  end
@@ -141,40 +178,46 @@ module Poseidon
141
178
  def resolve_offset_if_necessary
142
179
  return unless Symbol === @offset || @offset < 0
143
180
 
144
- if @offset == :earliest_offset
145
- @offset = -2
146
- elsif @offset == :latest_offset
147
- @offset = -1
181
+ protocol_offset = case @offset
182
+ when :earliest_offset
183
+ -2
184
+ when :latest_offset
185
+ -1
186
+ else
187
+ -1
148
188
  end
149
189
 
150
- topic_offset_responses = @connection.offset(build_topic_offset_request)
190
+ topic_offset_responses = @connection.offset(build_topic_offset_request(protocol_offset))
151
191
  partition_offsets = topic_offset_responses.first.partition_offsets
152
192
  if partition_offsets.first.error != Errors::NO_ERROR_CODE
153
193
  raise Errors::ERROR_CODES[partition_offsets.first.error]
154
194
  end
155
195
 
156
196
  offset_struct = partition_offsets.first.offsets.first
157
- if offset_struct.nil?
158
- @offset = 0
197
+
198
+ @offset = if offset_struct.nil?
199
+ 0
200
+ elsif @offset.kind_of?(Fixnum) && @offset < 0
201
+ offset_struct.offset + @offset
159
202
  else
160
- @offset = offset_struct.offset
203
+ offset_struct.offset
161
204
  end
162
205
  end
163
206
 
164
- def build_topic_offset_request
207
+ def build_topic_offset_request(protocol_offset)
165
208
  partition_offset_request = Protocol::PartitionOffsetRequest.new(
166
209
  @partition,
167
- @offset,
210
+ protocol_offset,
168
211
  max_number_of_offsets = 1)
169
212
 
170
- [Protocol::TopicOffsetRequest.new(@topic, [partition_offset_request])]
213
+ [Protocol::TopicOffsetRequest.new(topic, [partition_offset_request])]
171
214
  end
172
215
 
173
216
  def build_topic_fetch_request(max_bytes)
174
217
  partition_fetches = [Protocol::PartitionFetch.new(@partition,
175
218
  next_offset,
176
219
  max_bytes)]
177
- topic_fetches = [Protocol::TopicFetch.new(@topic, partition_fetches)]
220
+ topic_fetches = [Protocol::TopicFetch.new(topic, partition_fetches)]
178
221
  end
179
222
  end
180
223
  end