racecar 0.4.1 → 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 03f0ef60cb9d16c40adc14765b5fe497b1a1ee5384e7c63d4941373a618c5b52
4
- data.tar.gz: 8551c7ec9b838ca3df774afb9f5e886957b3ac7ae3d88f63fc64b084aee03e9a
3
+ metadata.gz: c45d8677d5b6355960cdef16cddc745909b28145bb3243cfa64170f6f284609a
4
+ data.tar.gz: dfed28e1acb40d781aedc6c0ae1d6d916b04efa48919cad6a05c4310720fee0b
5
5
  SHA512:
6
- metadata.gz: afc4e4df8135bb55c109213ba09bd05c258ceccfb7674530a8ca9a670cee6bbb962bd3994d73682257a2ea530d7a8727b2b20c4a33e2ac5dbe229783768302a6
7
- data.tar.gz: 3228f755bab3981835922c4dd47f3d25c65b258ff1fd612b9d752f7360bcb2f0613b9d3d455aeed66f45f39d7ce5ca2fd4732e8e116c4e61b26a9a25f2ab0b76
6
+ metadata.gz: 5a8f1c5077c03fb3dcca7124d6465ba6386f5a9a0dbe7ef5bf752073c58f04cc36c2665c1b5595b692289dfacddbb83e5cc022f07fbf05fc83dfcca266e743fb
7
+ data.tar.gz: 75dc3b0d158f0a4b8fb8a8242dc7f4b2c043217792f6351a27d2a8698361e7e86fe04315a4b167202eb1703045b58183533019ef9dd48563db954c076ddb4b01
@@ -2,6 +2,10 @@
2
2
 
3
3
  ## Unreleased
4
4
 
5
+ ## racecar v0.4.2
6
+
7
+ * Allow configuring `max_bytes` and `max_fetch_queue_size`.
8
+
5
9
  ## racecar v0.4.1
6
10
 
7
11
  * Allow configuring the producer (#77).
data/README.md CHANGED
@@ -246,6 +246,17 @@ All timeouts are defined in number of seconds.
246
246
  * `socket_timeout` – How long to wait when trying to communicate with a Kafka broker. Default is 30 seconds.
247
247
  * `max_wait_time` – How long to allow the Kafka brokers to wait before returning messages. A higher number means larger batches, at the cost of higher latency. Default is 1 second.
248
248
 
249
+ #### Memory & network usage
250
+
251
+ Kafka is _really_ good at throwing data at consumers, so you may want to tune these variables in order to avoid ballooning your process' memory or saturating your network capacity.
252
+
253
+ Racecar uses ruby-kafka under the hood, which fetches messages from the Kafka brokers in a background thread. This thread pushes fetch responses, possible containing messages from many partitions, into a queue that is read by the processing thread (AKA your code). The main way to control the fetcher thread is to control the size of those responses and the size of the queue.
254
+
255
+ * `max_bytes` — The maximum size of message sets returned from a single fetch request.
256
+ * `max_fetch_queue_size` — The maximum number of fetch responses to keep in the queue.
257
+
258
+ The memory usage limit is roughly estimated as `max_bytes * max_fetch_queue_size`, plus whatever your application uses.
259
+
249
260
  #### SSL encryption, authentication & authorization
250
261
 
251
262
  * `ssl_ca_cert` – A valid SSL certificate authority, as a string.
@@ -274,6 +285,12 @@ If using SCRAM:
274
285
  * `sasl_scram_password` – The password used to authenticate.
275
286
  * `sasl_scram_mechanism` – The SCRAM mechanism to use, either `sha256` or `sha512`.
276
287
 
288
+ #### Producing messages
289
+
290
+ These settings are related to consumers that _produce messages to Kafka_.
291
+
292
+ * `producer_compression_codec` – If defined, Racecar will compress messages before writing them to Kafka. The codec needs to be one of `gzip`, `lz4`, or `snappy`, either as a Symbol or a String.
293
+
277
294
  #### Datadog monitoring
278
295
 
279
296
  Racecar supports configuring ruby-kafka's [Datadog](https://www.datadoghq.com/) monitoring integration. If you're running a normal Datadog agent on your host, you just need to set `datadog_enabled` to `true`, as the rest of the settings come with sane defaults.
@@ -64,6 +64,8 @@ module Racecar
64
64
  rescue => e
65
65
  $stderr.puts "=> Crashed: #{e.class}: #{e}\n#{e.backtrace.join("\n")}"
66
66
 
67
+ config.error_handler.call(e)
68
+
67
69
  raise
68
70
  end
69
71
 
@@ -22,6 +22,9 @@ module Racecar
22
22
  desc "How long committed offsets will be retained."
23
23
  integer :offset_retention_time
24
24
 
25
+ desc "The maximum number of fetch responses to keep queued before processing"
26
+ integer :max_fetch_queue_size, default: 10
27
+
25
28
  desc "How long to pause a partition for if the consumer raises an exception while processing a message -- set to -1 to pause indefinitely"
26
29
  float :pause_timeout, default: 10
27
30
 
@@ -43,6 +46,9 @@ module Racecar
43
46
  desc "How long to allow the Kafka brokers to wait before returning messages"
44
47
  float :max_wait_time, default: 1
45
48
 
49
+ desc "The maximum size of message sets returned from a single fetch"
50
+ integer :max_bytes, default: 10485760
51
+
46
52
  desc "A prefix used when generating consumer group names"
47
53
  string :group_id_prefix
48
54
 
@@ -67,6 +73,9 @@ module Racecar
67
73
  desc "A valid SSL client certificate key"
68
74
  string :ssl_client_cert_key
69
75
 
76
+ desc "Support for using the CA certs installed on your system by default for SSL. More info, see: https://github.com/zendesk/ruby-kafka/pull/521"
77
+ boolean :ssl_ca_certs_from_system, default: false
78
+
70
79
  desc "The GSSAPI principal"
71
80
  string :sasl_gssapi_principal
72
81
 
@@ -146,6 +155,10 @@ module Racecar
146
155
  if connect_timeout <= max_wait_time
147
156
  raise ConfigError, "`connect_timeout` must be longer than `max_wait_time`"
148
157
  end
158
+
159
+ if max_pause_timeout && !pause_with_exponential_backoff?
160
+ raise ConfigError, "`max_pause_timeout` only makes sense when `pause_with_exponential_backoff` is enabled"
161
+ end
149
162
  end
150
163
 
151
164
  def load_consumer_class(consumer_class)
@@ -13,10 +13,14 @@ module Racecar
13
13
 
14
14
  # Adds one or more topic subscriptions.
15
15
  #
16
- # start_from_beginning - whether to start from the beginning or the end of each
17
- # partition.
18
- # max_bytes_per_partition - the maximum number of bytes to fetch from each partition
19
- # at a time.
16
+ # Can be called multiple times in order to subscribe to more topics.
17
+ #
18
+ # @param topics [String] one or more topics to subscribe to.
19
+ # @param start_from_beginning [Boolean] whether to start from the beginning or the end
20
+ # of each partition.
21
+ # @param max_bytes_per_partition [Integer] the maximum number of bytes to fetch from
22
+ # each partition at a time.
23
+ # @return [nil]
20
24
  def subscribes_to(*topics, start_from_beginning: true, max_bytes_per_partition: 1048576)
21
25
  topics.each do |topic|
22
26
  subscriptions << Subscription.new(topic, start_from_beginning, max_bytes_per_partition)
@@ -30,6 +30,7 @@ module Racecar
30
30
  sasl_scram_username: config.sasl_scram_username,
31
31
  sasl_scram_password: config.sasl_scram_password,
32
32
  sasl_scram_mechanism: config.sasl_scram_mechanism,
33
+ ssl_ca_certs_from_system: config.ssl_ca_certs_from_system,
33
34
  )
34
35
 
35
36
  @consumer = kafka.consumer(
@@ -39,6 +40,7 @@ module Racecar
39
40
  session_timeout: config.session_timeout,
40
41
  heartbeat_interval: config.heartbeat_interval,
41
42
  offset_retention_time: config.offset_retention_time,
43
+ fetcher_max_queue_size: config.max_fetch_queue_size,
42
44
  )
43
45
 
44
46
  # Stop the consumer on SIGINT, SIGQUIT or SIGTERM.
@@ -66,7 +68,7 @@ module Racecar
66
68
 
67
69
  begin
68
70
  if processor.respond_to?(:process)
69
- consumer.each_message(max_wait_time: config.max_wait_time) do |message|
71
+ consumer.each_message(max_wait_time: config.max_wait_time, max_bytes: config.max_bytes) do |message|
70
72
  payload = {
71
73
  consumer_class: processor.class.to_s,
72
74
  topic: message.topic,
@@ -80,7 +82,7 @@ module Racecar
80
82
  end
81
83
  end
82
84
  elsif processor.respond_to?(:process_batch)
83
- consumer.each_batch(max_wait_time: config.max_wait_time) do |batch|
85
+ consumer.each_batch(max_wait_time: config.max_wait_time, max_bytes: config.max_bytes) do |batch|
84
86
  payload = {
85
87
  consumer_class: processor.class.to_s,
86
88
  topic: batch.topic,
@@ -1,3 +1,3 @@
1
1
  module Racecar
2
- VERSION = "0.4.1"
2
+ VERSION = "0.4.2"
3
3
  end
@@ -20,7 +20,7 @@ Gem::Specification.new do |spec|
20
20
  spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
21
21
  spec.require_paths = ["lib"]
22
22
 
23
- spec.add_runtime_dependency "king_konf", "~> 0.3.0"
23
+ spec.add_runtime_dependency "king_konf", "~> 0.3.6"
24
24
  spec.add_runtime_dependency "ruby-kafka", "~> 0.6"
25
25
 
26
26
  spec.add_development_dependency "bundler", "~> 1.13"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: racecar
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.1
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: exe
11
11
  cert_chain: []
12
- date: 2018-05-17 00:00:00.000000000 Z
12
+ date: 2018-06-07 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: king_konf
@@ -17,14 +17,14 @@ dependencies:
17
17
  requirements:
18
18
  - - "~>"
19
19
  - !ruby/object:Gem::Version
20
- version: 0.3.0
20
+ version: 0.3.6
21
21
  type: :runtime
22
22
  prerelease: false
23
23
  version_requirements: !ruby/object:Gem::Requirement
24
24
  requirements:
25
25
  - - "~>"
26
26
  - !ruby/object:Gem::Version
27
- version: 0.3.0
27
+ version: 0.3.6
28
28
  - !ruby/object:Gem::Dependency
29
29
  name: ruby-kafka
30
30
  requirement: !ruby/object:Gem::Requirement