racecar 2.0.0.beta5 → 2.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f37b63d59011da69d49663e81d80b7681ab0ec703ca1641d95d8869d767ab60b
4
- data.tar.gz: be70ea61f2571a4f0e5a44be4984b3126374530abb8cb0a376809d3566c73d3e
3
+ metadata.gz: e2c32338556d27bfcbae35df0758163a457d2c6e3f520141206c5a9c0124cc68
4
+ data.tar.gz: 43d9c42d0d483c269b15d417ef59e2985da3ca3945d34d6772f8dd262ccbfaf6
5
5
  SHA512:
6
- metadata.gz: b3224f3e237adff5f5d069a651708ee68c0e1ef2a2f61f95cae1eb864c0685d8bd20d9648870f147b1384d71025658852b504ebfafeeedcd9ef31dd56e23eac0
7
- data.tar.gz: 1e1ddf3901371f8863798c209dda31da2901e78d4b36d072ee4f4fd9569d0538ccbac66f3bbc033da69e38d4419e1c849ef8133ad64463931bf601e5530ec824
6
+ metadata.gz: 0467ac1cdefb6cad9870dd73b92f4a5a943b9f685ff3fc876b3f183d109ae3d29d7c2c7dffea8f31bca7c7b18565e5aba04d4865c94f2448a7228be175855a5b
7
+ data.tar.gz: e4ab43eb180995af916d447b006438b4a48cb808b29aabec52b455e246541a083192d1b560a957fa6f3ab7d5412dd12ab74aac49acc0b606c3df87cec90b93b6
@@ -0,0 +1,56 @@
1
+ version: 2.1
2
+ orbs:
3
+ ruby: circleci/ruby@0.1.2
4
+
5
+ jobs:
6
+ build:
7
+ docker:
8
+ - image: circleci/ruby:2.6.3-stretch-node
9
+ executor: ruby/default
10
+ steps:
11
+ - checkout
12
+ - run:
13
+ name: Which bundler?
14
+ command: bundle -v
15
+ - ruby/bundle-install
16
+ - run: bundle exec rspec --exclude-pattern='spec/integration/*_spec.rb'
17
+ integration-tests:
18
+ docker:
19
+ - image: circleci/ruby:2.6.3-stretch-node
20
+ - image: wurstmeister/zookeeper
21
+ - image: wurstmeister/kafka:2.11-2.0.0
22
+ environment:
23
+ KAFKA_ADVERTISED_HOST_NAME: localhost
24
+ KAFKA_ADVERTISED_PORT: 9092
25
+ KAFKA_PORT: 9092
26
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
27
+ KAFKA_DELETE_TOPIC_ENABLE: true
28
+ - image: wurstmeister/kafka:2.11-2.0.0
29
+ environment:
30
+ KAFKA_ADVERTISED_HOST_NAME: localhost
31
+ KAFKA_ADVERTISED_PORT: 9093
32
+ KAFKA_PORT: 9093
33
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
34
+ KAFKA_DELETE_TOPIC_ENABLE: true
35
+ - image: wurstmeister/kafka:2.11-2.0.0
36
+ environment:
37
+ KAFKA_ADVERTISED_HOST_NAME: localhost
38
+ KAFKA_ADVERTISED_PORT: 9094
39
+ KAFKA_PORT: 9094
40
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
41
+ KAFKA_DELETE_TOPIC_ENABLE: true
42
+ executor: ruby/default
43
+ steps:
44
+ - checkout
45
+ - run:
46
+ name: Which bundler?
47
+ command: bundle -v
48
+ - ruby/bundle-install
49
+ - run: bundle exec rspec --pattern='spec/integration/*_spec.rb'
50
+
51
+ workflows:
52
+ version: 2
53
+ test:
54
+ jobs:
55
+ - build
56
+ - integration-tests
@@ -0,0 +1,61 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - '**'
7
+
8
+ jobs:
9
+ unit-specs:
10
+ runs-on: ubuntu-latest
11
+
12
+ strategy:
13
+ matrix:
14
+ ruby-version: ["2.5", "2.6"]
15
+
16
+ steps:
17
+ - uses: zendesk/checkout@v2
18
+ - name: Set up Ruby
19
+ uses: zendesk/setup-ruby@v1.58.0
20
+ with:
21
+ ruby-version: ${{ matrix.ruby-version }}
22
+ bundler-cache: true
23
+ - name: Build and test with RSpec
24
+ run: bundle exec rspec --format documentation --require spec_helper --color --exclude-pattern='spec/integration/*_spec.rb'
25
+
26
+ integration-specs:
27
+ runs-on: ubuntu-latest
28
+
29
+ services:
30
+ zookeeper:
31
+ image: confluentinc/cp-zookeeper
32
+ ports:
33
+ - 2181:2181
34
+ env:
35
+ ZOOKEEPER_CLIENT_PORT: 2181
36
+
37
+ kafka:
38
+ image: confluentinc/cp-kafka
39
+ ports:
40
+ - 9092:9092
41
+ - 29092:29092
42
+ options: --health-cmd "kafka-topics --list --bootstrap-server=localhost:9092" --health-interval 10s --health-timeout 5s --health-retries 5
43
+ env:
44
+ KAFKA_BROKER_ID: 1
45
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
46
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
47
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
48
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
49
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
50
+
51
+ steps:
52
+ - uses: zendesk/checkout@v2
53
+ - name: Set up Ruby
54
+ uses: zendesk/setup-ruby@v1.58.0
55
+ with:
56
+ ruby-version: 2.7
57
+ bundler-cache: true
58
+ - name: Build and test with RSpec
59
+ env:
60
+ RACECAR_BROKERS: localhost:9092
61
+ run: bundle exec rspec --format documentation --require spec_helper --color spec/integration/*_spec.rb
data/.gitignore CHANGED
@@ -1,10 +1,9 @@
1
1
  /.bundle/
2
2
  /.yardoc
3
- /Gemfile.lock
4
3
  /_yardoc/
5
4
  /coverage/
6
5
  /doc/
7
6
  /pkg/
8
7
  /spec/reports/
9
8
  /tmp/
10
- /vendor/bundle/
9
+ /vendor/bundle/
@@ -1,30 +1,48 @@
1
1
  # Changelog
2
2
 
3
+ ## Unreleased
4
+
5
+ ## racecar v2.2.0
6
+
7
+ * [Racecar::ConsumerSet] **breaking change** `Racecar::ConsumerSet`'s functions `poll` and `batch_pall` expect the max wait values to be given in milliseconds. The defaults were using `config.max_wait_time`, which is in seconds. If you do not directly use `Racecar::ConsumerSet`, or always call its `poll` and `batch_poll` functions by specfiying the max wait time (the first argument), then this breaking change does not affect you. ([#214](https://github.com/zendesk/racecar/pull/214))
8
+
9
+ ## racecar v2.1.1
10
+
11
+ * [Bugfix] Close RdKafka consumer in ConsumerSet#reset_current_consumer to prevent memory leak (#196)
12
+ * [Bugfix] `poll`/`batch_poll` would not retry in edge cases and raise immediately. They still honor the `max_wait_time` setting, but might return no messages instead and only retry on their next call. ([#177](https://github.com/zendesk/racecar/pull/177))
13
+
14
+ ## racecar v2.1.0
15
+
16
+ * Bump rdkafka to 0.8.0 (#191)
17
+
3
18
  ## racecar v2.0.0
4
19
 
5
- * Replace `ruby-kafka` with `rdkafka-ruby`
6
- * Removed config option `sasl_over_ssl`
7
- * [Racecar::Consumer] Do not pause consuming partitions on exception
8
- * [Racecar::Consumer] `topic`, `payload` and `key` are mandadory to method `produce`
9
- * [Racecar::Consumer] `process_batch` retrieves an array of messages instead of batch object
10
- * [Racecar::Consumer] Remove `offset_retention_time`
11
- * [Racecar::Consumer] Allow providing `additional_config` for subscriptions
12
- * [Racecar::Consumer] Provide access to `producer` and `consumer`
13
- * [Racecar::Consumer] Enforce delivering messages with method `deliver!`
14
- * [Racecar::Consumer] instead of raising when a partition EOF is reached, the result can be queried through `consumer.last_poll_read_partition_eof?`
15
- * [Racecar::Config] Remove `offset_retention_time`, `connect_timeout` and `offset_commit_threshold`
16
- * [Racecar::Config] Pass config to `rdkafka-ruby` via `producer` and `consumer`
17
- * [Racecar::Config] Replace `max_fetch_queue_size` with `min_message_queue_size`
18
- * [Racecar::Config] Add `synchronous_commits` to control blocking of `consumer.commit` (default `false`)
19
- * [Racecar::Config] Add `security_protocol` to control protocol between client and broker
20
- * [Racecar::Config] SSL configuration via `ssl_ca_location`, `ssl_crl_location`, `ssl_keystore_location` and `ssl_keystore_password`
21
- * [Racecar::Config] SASL configuration via `sasl_mechanism`, `sasl_kerberos_service_name`, `sasl_kerberos_principal`, `sasl_kerberos_kinit_cmd`, `sasl_kerberos_keytab`, `sasl_kerberos_min_time_before_relogin`, `sasl_username` and `sasl_password`
20
+ * Replace `ruby-kafka` with `rdkafka-ruby` as the low-level library underneath Racecar (#91).
21
+ * Fix `max_wait_time` usage (#179).
22
+ * Removed config option `sasl_over_ssl`.
23
+ * [Racecar::Consumer] Do not pause consuming partitions on exception.
24
+ * [Racecar::Consumer] `topic`, `payload` and `key` are mandadory to method `produce`.
25
+ * [Racecar::Consumer] `process_batch` retrieves an array of messages instead of batch object.
26
+ * [Racecar::Consumer] Remove `offset_retention_time`.
27
+ * [Racecar::Consumer] Allow providing `additional_config` for subscriptions.
28
+ * [Racecar::Consumer] Provide access to `producer` and `consumer`.
29
+ * [Racecar::Consumer] Enforce delivering messages with method `deliver!`.
30
+ * [Racecar::Consumer] instead of raising when a partition EOF is reached, the result can be queried through `consumer.last_poll_read_partition_eof?`.
31
+ * [Racecar::Config] Remove `offset_retention_time`, `connect_timeout` and `offset_commit_threshold`.
32
+ * [Racecar::Config] Pass config to `rdkafka-ruby` via `producer` and `consumer`.
33
+ * [Racecar::Config] Replace `max_fetch_queue_size` with `min_message_queue_size`.
34
+ * [Racecar::Config] Add `synchronous_commits` to control blocking of `consumer.commit` (default `false`).
35
+ * [Racecar::Config] Add `security_protocol` to control protocol between client and broker.
36
+ * [Racecar::Config] SSL configuration via `ssl_ca_location`, `ssl_crl_location`, `ssl_keystore_location` and `ssl_keystore_password`.
37
+ * [Racecar::Config] SASL configuration via `sasl_mechanism`, `sasl_kerberos_service_name`, `sasl_kerberos_principal`, `sasl_kerberos_kinit_cmd`, `sasl_kerberos_keytab`, `sasl_kerberos_min_time_before_relogin`, `sasl_username` and `sasl_password`.
22
38
  * [Instrumentation] `produce_message.racecar` sent whenever a produced message is queued. Payload includes `topic`, `key`, `value` and `create_time`.
23
39
  * [Instrumentation] `acknowledged_message.racecar` send whenever a produced message was successfully received by Kafka. Payload includes `offset` and `partition`, but no message details.
24
- * [Instrumentation] `rdkafka-ruby` does not yet provide instrumentation [rdkafka-ruby#54](https://github.com/appsignal/rdkafka-ruby/issues/54)
25
- * [Instrumentation] if processors define a `statistics_callback`, it will be called once every second for every subscription or producer connection. The first argument will be a Hash, for contents see [librdkafka STATISTICS.md](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md)
40
+ * [Instrumentation] `rdkafka-ruby` does not yet provide instrumentation [rdkafka-ruby#54](https://github.com/appsignal/rdkafka-ruby/issues/54).
41
+ * [Instrumentation] if processors define a `statistics_callback`, it will be called once every second for every subscription or producer connection. The first argument will be a Hash, for contents see [librdkafka STATISTICS.md](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md).
26
42
  * Add current directory to `$LOAD_PATH` only when `--require` option is used (#117).
27
- * Remove manual heartbeat support, see [Long-running message processing section in README](README.md#long-running-message-processing)
43
+ * Remove manual heartbeat support, see [Long-running message processing section in README](README.md#long-running-message-processing).
44
+ * Rescue exceptions--then log and pass to `on_error`--at the outermost level of `exe/racecar`, so that exceptions raised outside `Cli.run` are not silently discarded (#186).
45
+ * When exceptions with a `cause` are logged, recursively log the `cause` detail, separated by `--- Caused by: ---\n`.
28
46
 
29
47
  ## racecar v1.0.0
30
48
 
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source 'https://rubygems.org'
2
4
 
3
5
  # Specify your gem's dependencies in racecar.gemspec
@@ -0,0 +1,69 @@
1
+ PATH
2
+ remote: .
3
+ specs:
4
+ racecar (2.1.1)
5
+ king_konf (~> 1.0.0)
6
+ rdkafka (~> 0.8.0)
7
+
8
+ GEM
9
+ remote: https://rubygems.org/
10
+ specs:
11
+ activesupport (6.0.3.4)
12
+ concurrent-ruby (~> 1.0, >= 1.0.2)
13
+ i18n (>= 0.7, < 2)
14
+ minitest (~> 5.1)
15
+ tzinfo (~> 1.1)
16
+ zeitwerk (~> 2.2, >= 2.2.2)
17
+ coderay (1.1.3)
18
+ concurrent-ruby (1.1.7)
19
+ diff-lcs (1.4.4)
20
+ dogstatsd-ruby (4.8.2)
21
+ ffi (1.13.1)
22
+ i18n (1.8.5)
23
+ concurrent-ruby (~> 1.0)
24
+ king_konf (1.0.0)
25
+ method_source (1.0.0)
26
+ mini_portile2 (2.5.0)
27
+ minitest (5.14.2)
28
+ pry (0.13.1)
29
+ coderay (~> 1.1)
30
+ method_source (~> 1.0)
31
+ rake (13.0.1)
32
+ rdkafka (0.8.1)
33
+ ffi (~> 1.9)
34
+ mini_portile2 (~> 2.1)
35
+ rake (>= 12.3)
36
+ rspec (3.10.0)
37
+ rspec-core (~> 3.10.0)
38
+ rspec-expectations (~> 3.10.0)
39
+ rspec-mocks (~> 3.10.0)
40
+ rspec-core (3.10.0)
41
+ rspec-support (~> 3.10.0)
42
+ rspec-expectations (3.10.0)
43
+ diff-lcs (>= 1.2.0, < 2.0)
44
+ rspec-support (~> 3.10.0)
45
+ rspec-mocks (3.10.0)
46
+ diff-lcs (>= 1.2.0, < 2.0)
47
+ rspec-support (~> 3.10.0)
48
+ rspec-support (3.10.0)
49
+ thread_safe (0.3.6)
50
+ timecop (0.9.2)
51
+ tzinfo (1.2.8)
52
+ thread_safe (~> 0.1)
53
+ zeitwerk (2.4.2)
54
+
55
+ PLATFORMS
56
+ ruby
57
+
58
+ DEPENDENCIES
59
+ activesupport (>= 4.0, < 6.1)
60
+ bundler (>= 1.13, < 3)
61
+ dogstatsd-ruby (>= 4.0.0, < 5.0.0)
62
+ pry
63
+ racecar!
64
+ rake (> 10.0)
65
+ rspec (~> 3.0)
66
+ timecop
67
+
68
+ BUNDLED WITH
69
+ 2.1.4
data/README.md CHANGED
@@ -1,5 +1,3 @@
1
- **IMPORTANT:** The `master` branch is unstable, working towards a v2 release that breaks a lot of stuff. Use the `v1-stable` branch if you want to suggest changes.
2
-
3
1
  # Racecar
4
2
 
5
3
  Racecar is a friendly and easy-to-approach Kafka consumer framework. It allows you to write small applications that process messages stored in Kafka topics while optionally integrating with your Rails models.
@@ -21,6 +19,7 @@ The framework is based on [rdkafka-ruby](https://github.com/appsignal/rdkafka-ru
21
19
  7. [Handling errors](#handling-errors)
22
20
  8. [Logging](#logging)
23
21
  9. [Operations](#operations)
22
+ 10. [Upgrading from v1 to v2](#upgrading-from-v1-to-v2)
24
23
  3. [Development](#development)
25
24
  4. [Contributing](#contributing)
26
25
  5. [Support and Discussion](#support-and-discussion)
@@ -51,9 +50,7 @@ This will add a config file in `config/racecar.yml`.
51
50
 
52
51
  ## Usage
53
52
 
54
- Racecar is built for simplicity of development and operation. If you need more flexibility, it's quite straightforward to build your own Kafka consumer executables using [ruby-kafka](https://github.com/zendesk/ruby-kafka#consuming-messages-from-kafka) directly.
55
-
56
- First, a short introduction to the Kafka consumer concept as well as some basic background on Kafka.
53
+ Racecar is built for simplicity of development and operation. First, a short introduction to the Kafka consumer concept as well as some basic background on Kafka.
57
54
 
58
55
  Kafka stores messages in so-called _partitions_ which are grouped into _topics_. Within a partition, each message gets a unique offset.
59
56
 
@@ -227,7 +224,7 @@ You can set message headers by passing a `headers:` option with a Hash of header
227
224
 
228
225
  Racecar provides a flexible way to configure your consumer in a way that feels at home in a Rails application. If you haven't already, run `bundle exec rails generate racecar:install` in order to generate a config file. You'll get a separate section for each Rails environment, with the common configuration values in a shared `common` section.
229
226
 
230
- **Note:** many of these configuration keys correspond directly to similarly named concepts in [ruby-kafka](https://github.com/zendesk/ruby-kafka); for more details on low-level operations, read that project's documentation.
227
+ **Note:** many of these configuration keys correspond directly to similarly named concepts in [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby); for more details on low-level operations, read that project's documentation.
231
228
 
232
229
  It's also possible to configure Racecar using environment variables. For any given configuration key, there should be a corresponding environment variable with the prefix `RACECAR_`, in upper case. For instance, in order to configure the client id, set `RACECAR_CLIENT_ID=some-id` in the process in which the Racecar consumer is launched. You can set `brokers` by passing a comma-separated list, e.g. `RACECAR_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092`.
233
230
 
@@ -274,7 +271,7 @@ All timeouts are defined in number of seconds.
274
271
 
275
272
  Kafka is _really_ good at throwing data at consumers, so you may want to tune these variables in order to avoid ballooning your process' memory or saturating your network capacity.
276
273
 
277
- Racecar uses ruby-kafka under the hood, which fetches messages from the Kafka brokers in a background thread. This thread pushes fetch responses, possible containing messages from many partitions, into a queue that is read by the processing thread (AKA your code). The main way to control the fetcher thread is to control the size of those responses and the size of the queue.
274
+ Racecar uses [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby) under the hood, which fetches messages from the Kafka brokers in a background thread. This thread pushes fetch responses, possible containing messages from many partitions, into a queue that is read by the processing thread (AKA your code). The main way to control the fetcher thread is to control the size of those responses and the size of the queue.
278
275
 
279
276
  * `max_bytes` — Maximum amount of data the broker shall return for a Fetch request.
280
277
  * `min_message_queue_size` — The minimum number of messages in the local consumer queue.
@@ -314,7 +311,7 @@ These settings are related to consumers that _produce messages to Kafka_.
314
311
 
315
312
  #### Datadog monitoring
316
313
 
317
- Racecar supports configuring ruby-kafka's [Datadog](https://www.datadoghq.com/) monitoring integration. If you're running a normal Datadog agent on your host, you just need to set `datadog_enabled` to `true`, as the rest of the settings come with sane defaults.
314
+ Racecar supports [Datadog](https://www.datadoghq.com/) monitoring integration. If you're running a normal Datadog agent on your host, you just need to set `datadog_enabled` to `true`, as the rest of the settings come with sane defaults.
318
315
 
319
316
  * `datadog_enabled` – Whether Datadog monitoring is enabled (defaults to `false`).
320
317
  * `datadog_host` – The host running the Datadog agent.
@@ -484,10 +481,17 @@ In order to gracefully shut down a Racecar consumer process, send it the `SIGTER
484
481
  In order to introspect the configuration of a consumer process, send it the `SIGUSR1` signal. This will make Racecar print its configuration to the standard error file descriptor associated with the consumer process, so you'll need to know where that is written to.
485
482
 
486
483
 
484
+ ### Upgrading from v1 to v2
485
+
486
+ In order to safely upgrade from Racecar v1 to v2, you need to completely shut down your consumer group before starting it up again with the v2 Racecar dependency. In general, you should avoid rolling deploys for consumers groups, so it is likely the case that this will just work for you, but it's a good idea to check first.
487
+
488
+
487
489
  ## Development
488
490
 
489
491
  After checking out the repo, run `bin/setup` to install dependencies. Then, run `rspec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
490
492
 
493
+ The integration tests run against a Kafka instance that is not automatically started from within `rspec`. You can set one up using the provided `docker-compose.yml` by running `docker-compose up`.
494
+
491
495
 
492
496
  ## Contributing
493
497
 
@@ -496,9 +500,9 @@ Bug reports and pull requests are welcome on [GitHub](https://github.com/zendesk
496
500
 
497
501
  ## Support and Discussion
498
502
 
499
- If you've discovered a bug, please file a [Github issue](https://github.com/zendesk/racecar/issues/new), and make sure to include all the relevant information, including the version of Racecar, ruby-kafka, and Kafka that you're using.
503
+ If you've discovered a bug, please file a [Github issue](https://github.com/zendesk/racecar/issues/new), and make sure to include all the relevant information, including the version of Racecar, rdkafka-ruby, and Kafka that you're using.
500
504
 
501
- If you have other questions, or would like to discuss best practises, how to contribute to the project, or any other ruby-kafka related topic, [join our Slack team](https://ruby-kafka-slack.herokuapp.com/)!
505
+ If you have other questions, or would like to discuss best practises, or how to contribute to the project, [join our Slack team](https://ruby-kafka-slack.herokuapp.com/)!
502
506
 
503
507
 
504
508
  ## Copyright and license
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "bundler/gem_tasks"
2
4
  require "rspec/core/rake_task"
3
5
 
@@ -0,0 +1,32 @@
1
+ version: '2'
2
+ services:
3
+ zookeeper:
4
+ image: confluentinc/cp-zookeeper:5.5.1
5
+ hostname: zookeeper
6
+ container_name: zookeeper
7
+ ports:
8
+ - "2181:2181"
9
+ environment:
10
+ ZOOKEEPER_CLIENT_PORT: 2181
11
+ ZOOKEEPER_TICK_TIME: 2000
12
+
13
+ broker:
14
+ image: confluentinc/cp-kafka:5.5.1
15
+ hostname: broker
16
+ container_name: broker
17
+ depends_on:
18
+ - zookeeper
19
+ ports:
20
+ - "29092:29092"
21
+ - "9092:9092"
22
+ - "9101:9101"
23
+ environment:
24
+ KAFKA_BROKER_ID: 1
25
+ KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
26
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
27
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
28
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
29
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
30
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
31
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
32
+ KAFKA_JMX_PORT: 9101
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  class BatchConsumer < Racecar::Consumer
2
4
  subscribes_to "messages", start_from_beginning: false
3
5
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  class CatConsumer < Racecar::Consumer
2
4
  subscribes_to "messages", start_from_beginning: false
3
5
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  class ProducingConsumer < Racecar::Consumer
2
4
  subscribes_to "messages", start_from_beginning: false
3
5
 
@@ -3,19 +3,42 @@
3
3
  require "racecar"
4
4
  require "racecar/cli"
5
5
 
6
- begin
7
- Racecar::Cli.main(ARGV)
8
- rescue SignalException => e
9
- # We might receive SIGTERM before our signal handler is installed.
10
- if Signal.signame(e.signo) == "TERM"
11
- exit(0)
12
- else
13
- raise
6
+ module Racecar
7
+ class << self
8
+ def start(argv)
9
+ Cli.main(argv)
10
+ rescue SignalException => e
11
+ # We might receive SIGTERM before our signal handler is installed.
12
+ if Signal.signame(e.signo) == "TERM"
13
+ exit(0)
14
+ else
15
+ raise
16
+ end
17
+ rescue SystemExit
18
+ raise
19
+ rescue Exception => e
20
+ $stderr.puts "=> Crashed: #{exception_with_causes(e)}\n#{e.backtrace.join("\n")}"
21
+
22
+ Racecar.config.error_handler.call(e)
23
+
24
+ exit(1)
25
+ else
26
+ exit(0)
27
+ end
28
+
29
+ private
30
+
31
+ def exception_with_causes(e)
32
+ result = +"#{e.class}: #{e}"
33
+ if e.cause
34
+ result << "\n"
35
+ result << "--- Caused by: ---\n"
36
+ result << exception_with_causes(e.cause)
37
+ end
38
+ result
39
+ end
14
40
  end
15
- rescue
16
- # Exceptions are printed to STDERR and sent to the error handler
17
- # in `Racecar::Cli#run`, so we don't need to do anything here.
18
- exit(1)
19
- else
20
- exit(0)
21
41
  end
42
+
43
+ # Start your engines!
44
+ Racecar.start(ARGV)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # only needed when ruby < 2.4 and not using active support
2
4
 
3
5
  unless {}.respond_to? :compact
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  module Generators
3
5
  class ConsumerGenerator < Rails::Generators::NamedBase
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  module Generators
3
5
  class InstallGenerator < Rails::Generators::Base
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "logger"
2
4
 
3
5
  require "racecar/instrumenter"
@@ -6,6 +8,7 @@ require "racecar/consumer"
6
8
  require "racecar/consumer_set"
7
9
  require "racecar/runner"
8
10
  require "racecar/config"
11
+ require "racecar/version"
9
12
  require "ensure_hash_compact"
10
13
 
11
14
  module Racecar
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "optparse"
2
4
  require "logger"
3
5
  require "fileutils"
@@ -6,8 +8,10 @@ require "racecar/daemon"
6
8
 
7
9
  module Racecar
8
10
  class Cli
9
- def self.main(args)
10
- new(args).run
11
+ class << self
12
+ def main(args)
13
+ new(args).run
14
+ end
11
15
  end
12
16
 
13
17
  def initialize(args)
@@ -16,10 +20,6 @@ module Racecar
16
20
  @consumer_name = args.first or raise Racecar::Error, "no consumer specified"
17
21
  end
18
22
 
19
- def config
20
- Racecar.config
21
- end
22
-
23
23
  def run
24
24
  $stderr.puts "=> Starting Racecar consumer #{consumer_name}..."
25
25
 
@@ -61,18 +61,16 @@ module Racecar
61
61
  processor = consumer_class.new
62
62
 
63
63
  Racecar.run(processor)
64
- rescue => e
65
- $stderr.puts "=> Crashed: #{e.class}: #{e}\n#{e.backtrace.join("\n")}"
66
-
67
- config.error_handler.call(e)
68
-
69
- raise
70
64
  end
71
65
 
72
66
  private
73
67
 
74
68
  attr_reader :consumer_name
75
69
 
70
+ def config
71
+ Racecar.config
72
+ end
73
+
76
74
  def daemonize!
77
75
  daemon = Daemon.new(File.expand_path(config.pidfile))
78
76
 
@@ -102,12 +100,7 @@ module Racecar
102
100
  opts.on("-r", "--require STRING", "Require a library before starting the consumer") do |lib|
103
101
  $LOAD_PATH.unshift(Dir.pwd) unless load_path_modified
104
102
  load_path_modified = true
105
- begin
106
- require lib
107
- rescue => e
108
- $stderr.puts "=> #{lib} failed to load: #{e.message}"
109
- exit
110
- end
103
+ require lib
111
104
  end
112
105
 
113
106
  opts.on("-l", "--log STRING", "Log to the specified file") do |logfile|
@@ -115,13 +108,13 @@ module Racecar
115
108
  end
116
109
 
117
110
  Racecar::Config.variables.each do |variable|
118
- opt_name = "--" << variable.name.to_s.gsub("_", "-")
111
+ opt_name = +"--#{variable.name.to_s.gsub('_', '-')}"
119
112
  opt_name << " #{variable.type.upcase}" unless variable.boolean?
120
113
 
121
114
  desc = variable.description || "N/A"
122
115
 
123
116
  if variable.default
124
- desc << " (default: #{variable.default.inspect})"
117
+ desc += " (default: #{variable.default.inspect})"
125
118
  end
126
119
 
127
120
  opts.on(opt_name, desc) do |value|
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "king_konf"
2
4
 
3
5
  module Racecar
@@ -52,7 +54,7 @@ module Racecar
52
54
  desc "How long to wait when trying to communicate with a Kafka broker"
53
55
  float :socket_timeout, default: 30
54
56
 
55
- desc "How long to allow the Kafka brokers to wait before returning messages"
57
+ desc "How long to allow the Kafka brokers to wait before returning messages (in seconds)"
56
58
  float :max_wait_time, default: 1
57
59
 
58
60
  desc "Maximum amount of data the broker shall return for a Fetch request"
@@ -156,6 +158,10 @@ module Racecar
156
158
 
157
159
  attr_accessor :subscriptions, :logger
158
160
 
161
+ def max_wait_time_ms
162
+ max_wait_time * 1000
163
+ end
164
+
159
165
  def initialize(env: ENV)
160
166
  super(env: env)
161
167
  @error_handler = proc {}
@@ -192,8 +198,8 @@ module Racecar
192
198
  group_id_prefix,
193
199
 
194
200
  # MyFunnyConsumer => my-funny-consumer
195
- consumer_class.name.gsub(/[a-z][A-Z]/) {|str| str[0] << "-" << str[1] }.downcase,
196
- ].compact.join("")
201
+ consumer_class.name.gsub(/[a-z][A-Z]/) { |str| "#{str[0]}-#{str[1]}" }.downcase,
202
+ ].compact.join
197
203
 
198
204
  self.subscriptions = consumer_class.subscriptions
199
205
  self.max_wait_time = consumer_class.max_wait_time || self.max_wait_time
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  class Consumer
3
5
  Subscription = Struct.new(:topic, :start_from_beginning, :max_bytes_per_partition, :additional_config)
@@ -54,7 +56,7 @@ module Racecar
54
56
  protected
55
57
 
56
58
  # https://github.com/appsignal/rdkafka-ruby#producing-messages
57
- def produce(payload, topic:, key:, partition_key: nil, headers: nil, create_time: nil)
59
+ def produce(payload, topic:, key: nil, partition_key: nil, headers: nil, create_time: nil)
58
60
  @delivery_handles ||= []
59
61
  message_size = payload.respond_to?(:bytesize) ? payload.bytesize : 0
60
62
  instrumentation_payload = {
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  class ConsumerSet
3
5
  MAX_POLL_TRIES = 10
@@ -10,49 +12,39 @@ module Racecar
10
12
  @consumers = []
11
13
  @consumer_id_iterator = (0...@config.subscriptions.size).cycle
12
14
 
15
+ @previous_retries = 0
16
+
13
17
  @last_poll_read_nil_message = false
14
18
  end
15
19
 
16
- def poll(timeout_ms)
17
- maybe_select_next_consumer
18
- started_at ||= Time.now
19
- try ||= 0
20
- remain ||= timeout_ms
21
-
22
- msg = remain <= 0 ? nil : current.poll(remain)
23
- rescue Rdkafka::RdkafkaError => e
24
- wait_before_retry_ms = 100 * (2**try) # 100ms, 200ms, 400ms, …
25
- try += 1
26
- raise if try >= MAX_POLL_TRIES || remain <= wait_before_retry_ms
27
-
28
- @logger.error "(try #{try}): Error for topic subscription #{current_subscription}: #{e}"
29
-
30
- case e.code
31
- when :max_poll_exceeded, :transport # -147, -195
32
- reset_current_consumer
33
- end
34
-
35
- remain = remaining_time_ms(timeout_ms, started_at)
36
- raise if remain <= wait_before_retry_ms
37
-
38
- sleep wait_before_retry_ms/1000.0
39
- retry
40
- ensure
41
- @last_poll_read_nil_message = true if msg.nil?
20
+ def poll(max_wait_time_ms = @config.max_wait_time_ms)
21
+ batch_poll(max_wait_time_ms, 1).first
42
22
  end
43
23
 
44
- # XXX: messages are not guaranteed to be from the same partition
45
- def batch_poll(timeout_ms)
46
- @batch_started_at = Time.now
47
- @messages = []
48
- while collect_messages_for_batch? do
49
- remain = remaining_time_ms(timeout_ms, @batch_started_at)
50
- break if remain <= 0
51
- msg = poll(remain)
24
+ # batch_poll collects messages until any of the following occurs:
25
+ # - max_wait_time_ms time has passed
26
+ # - max_messages have been collected
27
+ # - a nil message was polled (end of topic, Kafka stalled, etc.)
28
+ #
29
+ # The messages are from a single topic, but potentially from more than one partition.
30
+ #
31
+ # Any errors during polling are retried in an exponential backoff fashion. If an error
32
+ # occurs, but there is no time left for a backoff and retry, it will return the
33
+ # already collected messages and only retry on the next call.
34
+ def batch_poll(max_wait_time_ms = @config.max_wait_time_ms, max_messages = @config.fetch_messages)
35
+ started_at = Time.now
36
+ remain_ms = max_wait_time_ms
37
+ maybe_select_next_consumer
38
+ messages = []
39
+
40
+ while remain_ms > 0 && messages.size < max_messages
41
+ remain_ms = remaining_time_ms(max_wait_time_ms, started_at)
42
+ msg = poll_with_retries(remain_ms)
52
43
  break if msg.nil?
53
- @messages << msg
44
+ messages << msg
54
45
  end
55
- @messages
46
+
47
+ messages
56
48
  end
57
49
 
58
50
  def store_offset(message)
@@ -123,6 +115,55 @@ module Racecar
123
115
 
124
116
  private
125
117
 
118
+ # polls a single message from the current consumer, retrying errors with exponential
119
+ # backoff. The sleep time is capped by max_wait_time_ms. If there's enough time budget
120
+ # left, it will retry before returning. If there isn't, the retry will only occur on
121
+ # the next call. It tries up to MAX_POLL_TRIES before passing on the exception.
122
+ def poll_with_retries(max_wait_time_ms)
123
+ try ||= @previous_retries
124
+ @previous_retries = 0
125
+ started_at ||= Time.now
126
+ remain_ms = remaining_time_ms(max_wait_time_ms, started_at)
127
+
128
+ wait_ms = try == 0 ? 0 : 50 * (2**try) # 0ms, 100ms, 200ms, 400ms, …
129
+ if wait_ms >= max_wait_time_ms && remain_ms > 1
130
+ @logger.debug "Capping #{wait_ms}ms to #{max_wait_time_ms-1}ms."
131
+ sleep (max_wait_time_ms-1)/1000.0
132
+ remain_ms = 1
133
+ elsif try == 0 && remain_ms == 0
134
+ @logger.debug "No time remains for polling messages. Will try on next call."
135
+ return nil
136
+ elsif wait_ms >= remain_ms
137
+ @logger.error "Only #{remain_ms}ms left, but want to wait for #{wait_ms}ms before poll. Will retry on next call."
138
+ @previous_retries = try
139
+ return nil
140
+ elsif wait_ms > 0
141
+ sleep wait_ms/1000.0
142
+ remain_ms -= wait_ms
143
+ end
144
+
145
+ poll_current_consumer(remain_ms)
146
+ rescue Rdkafka::RdkafkaError => e
147
+ try += 1
148
+ @instrumenter.instrument("poll_retry", try: try, rdkafka_time_limit: remain_ms, exception: e)
149
+ @logger.error "(try #{try}/#{MAX_POLL_TRIES}): Error for topic subscription #{current_subscription}: #{e}"
150
+ raise if try >= MAX_POLL_TRIES
151
+ retry
152
+ end
153
+
154
+ # polls a message for the current consumer, handling any API edge cases.
155
+ def poll_current_consumer(max_wait_time_ms)
156
+ msg = current.poll(max_wait_time_ms)
157
+ rescue Rdkafka::RdkafkaError => e
158
+ case e.code
159
+ when :max_poll_exceeded, :transport # -147, -195
160
+ reset_current_consumer
161
+ end
162
+ raise
163
+ ensure
164
+ @last_poll_read_nil_message = msg.nil?
165
+ end
166
+
126
167
  def find_consumer_by(topic, partition)
127
168
  each do |consumer|
128
169
  tpl = consumer.assignment.to_h
@@ -140,7 +181,12 @@ module Racecar
140
181
  end
141
182
 
142
183
  def reset_current_consumer
143
- @consumers[@consumer_id_iterator.peek] = nil
184
+ current_consumer_id = @consumer_id_iterator.peek
185
+ @logger.info "Resetting consumer with id: #{current_consumer_id}"
186
+
187
+ consumer = @consumers[current_consumer_id]
188
+ consumer.close unless consumer.nil?
189
+ @consumers[current_consumer_id] = nil
144
190
  end
145
191
 
146
192
  def maybe_select_next_consumer
@@ -160,11 +206,6 @@ module Racecar
160
206
  @logger.debug "Nothing to commit."
161
207
  end
162
208
 
163
- def collect_messages_for_batch?
164
- @messages.size < @config.fetch_messages &&
165
- (Time.now - @batch_started_at) < @config.max_wait_time
166
- end
167
-
168
209
  def rdkafka_config(subscription)
169
210
  # https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
170
211
  config = {
@@ -176,7 +217,7 @@ module Racecar
176
217
  "fetch.max.bytes" => @config.max_bytes,
177
218
  "message.max.bytes" => subscription.max_bytes_per_partition,
178
219
  "fetch.min.bytes" => @config.fetch_min_bytes,
179
- "fetch.wait.max.ms" => @config.max_wait_time * 1000,
220
+ "fetch.wait.max.ms" => @config.max_wait_time_ms,
180
221
  "group.id" => @config.group_id,
181
222
  "heartbeat.interval.ms" => @config.heartbeat_interval * 1000,
182
223
  "max.poll.interval.ms" => @config.max_poll_interval * 1000,
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "optparse"
2
4
  require "racecar/rails_config_file_loader"
3
5
  require "racecar/daemon"
@@ -98,7 +100,8 @@ module Racecar
98
100
  "client.id": Racecar.config.client_id,
99
101
  }.merge(Racecar.config.rdkafka_producer)).producer
100
102
 
101
- producer.produce(payload: message.value, key: message.key, topic: message.topic).wait
103
+ handle = producer.produce(payload: message.value, key: message.key, topic: message.topic)
104
+ handle.wait(max_wait_timeout: 5)
102
105
 
103
106
  $stderr.puts "=> Delivered message to Kafka cluster"
104
107
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  class Daemon
3
5
  attr_reader :pidfile
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  begin
2
4
  require "datadog/statsd"
3
5
  rescue LoadError
@@ -155,6 +157,15 @@ module Racecar
155
157
  end
156
158
  end
157
159
 
160
+ def poll_retry(event)
161
+ tags = {
162
+ client: event.payload.fetch(:client_id),
163
+ group_id: event.payload.fetch(:group_id),
164
+ }
165
+ rdkafka_error_code = event.payload.fetch(:exception).code.to_s.gsub(/\W/, '')
166
+ increment("consumer.poll.rdkafka_error.#{rdkafka_error_code}", tags: tags)
167
+ end
168
+
158
169
  def main_loop(event)
159
170
  tags = {
160
171
  client: event.payload.fetch(:client_id),
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  ##
3
5
  # Common API for instrumentation to standardize
@@ -11,6 +13,8 @@ module Racecar
11
13
  @default_payload = default_payload
12
14
 
13
15
  @backend = if defined?(ActiveSupport::Notifications)
16
+ # ActiveSupport needs `concurrent-ruby` but doesn't `require` it.
17
+ require 'concurrent/utility/monotonic_time'
14
18
  ActiveSupport::Notifications
15
19
  else
16
20
  NullInstrumenter
@@ -1,11 +1,16 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "forwardable"
2
4
 
3
5
  module Racecar
4
6
  class Message
5
7
  extend Forwardable
6
8
 
7
- def initialize(rdkafka_message)
9
+ attr_reader :retries_count
10
+
11
+ def initialize(rdkafka_message, retries_count: nil)
8
12
  @rdkafka_message = rdkafka_message
13
+ @retries_count = retries_count
9
14
  end
10
15
 
11
16
  def_delegators :@rdkafka_message, :topic, :partition, :offset, :key, :headers
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  # Ignores all instrumentation events.
3
5
  class NullInstrumenter
@@ -1,8 +1,12 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  class Pause
5
+ attr_reader :pauses_count
6
+
3
7
  def initialize(timeout: nil, max_timeout: nil, exponential_backoff: false)
4
8
  @started_at = nil
5
- @pauses = 0
9
+ @pauses_count = 0
6
10
  @timeout = timeout
7
11
  @max_timeout = max_timeout
8
12
  @exponential_backoff = exponential_backoff
@@ -11,7 +15,7 @@ module Racecar
11
15
  def pause!
12
16
  @started_at = Time.now
13
17
  @ends_at = @started_at + backoff_interval unless @timeout.nil?
14
- @pauses += 1
18
+ @pauses_count += 1
15
19
  end
16
20
 
17
21
  def resume!
@@ -38,13 +42,13 @@ module Racecar
38
42
  end
39
43
 
40
44
  def reset!
41
- @pauses = 0
45
+ @pauses_count = 0
42
46
  end
43
47
 
44
48
  def backoff_interval
45
49
  return Float::INFINITY if @timeout.nil?
46
50
 
47
- backoff_factor = @exponential_backoff ? 2**@pauses : 1
51
+ backoff_factor = @exponential_backoff ? 2**@pauses_count : 1
48
52
  timeout = backoff_factor * @timeout
49
53
 
50
54
  timeout = @max_timeout if @max_timeout && timeout > @max_timeout
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
4
  module RailsConfigFileLoader
3
5
  def self.load!
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "rdkafka"
2
4
  require "racecar/pause"
3
5
  require "racecar/message"
@@ -65,12 +67,12 @@ module Racecar
65
67
  @instrumenter.instrument("main_loop", instrumentation_payload) do
66
68
  case process_method
67
69
  when :batch then
68
- msg_per_part = consumer.batch_poll(config.max_wait_time).group_by(&:partition)
70
+ msg_per_part = consumer.batch_poll(config.max_wait_time_ms).group_by(&:partition)
69
71
  msg_per_part.each_value do |messages|
70
72
  process_batch(messages)
71
73
  end
72
74
  when :single then
73
- message = consumer.poll(config.max_wait_time)
75
+ message = consumer.poll(config.max_wait_time_ms)
74
76
  process(message) if message
75
77
  end
76
78
  end
@@ -166,14 +168,15 @@ module Racecar
166
168
  }
167
169
 
168
170
  @instrumenter.instrument("start_process_message", instrumentation_payload)
169
- with_pause(message.topic, message.partition, message.offset..message.offset) do
171
+ with_pause(message.topic, message.partition, message.offset..message.offset) do |pause|
170
172
  begin
171
173
  @instrumenter.instrument("process_message", instrumentation_payload) do
172
- processor.process(Racecar::Message.new(message))
174
+ processor.process(Racecar::Message.new(message, retries_count: pause.pauses_count))
173
175
  processor.deliver!
174
176
  consumer.store_offset(message)
175
177
  end
176
178
  rescue => e
179
+ instrumentation_payload[:retries_count] = pause.pauses_count
177
180
  config.error_handler.call(e, instrumentation_payload)
178
181
  raise e
179
182
  end
@@ -194,12 +197,16 @@ module Racecar
194
197
 
195
198
  @instrumenter.instrument("start_process_batch", instrumentation_payload)
196
199
  @instrumenter.instrument("process_batch", instrumentation_payload) do
197
- with_pause(first.topic, first.partition, first.offset..last.offset) do
200
+ with_pause(first.topic, first.partition, first.offset..last.offset) do |pause|
198
201
  begin
199
- processor.process_batch(messages.map {|message| Racecar::Message.new(message) })
202
+ racecar_messages = messages.map do |message|
203
+ Racecar::Message.new(message, retries_count: pause.pauses_count)
204
+ end
205
+ processor.process_batch(racecar_messages)
200
206
  processor.deliver!
201
207
  consumer.store_offset(messages.last)
202
208
  rescue => e
209
+ instrumentation_payload[:retries_count] = pause.pauses_count
203
210
  config.error_handler.call(e, instrumentation_payload)
204
211
  raise e
205
212
  end
@@ -208,17 +215,17 @@ module Racecar
208
215
  end
209
216
 
210
217
  def with_pause(topic, partition, offsets)
211
- return yield if config.pause_timeout == 0
218
+ pause = pauses[topic][partition]
219
+ return yield pause if config.pause_timeout == 0
212
220
 
213
221
  begin
214
- yield
222
+ yield pause
215
223
  # We've successfully processed a batch from the partition, so we can clear the pause.
216
224
  pauses[topic][partition].reset!
217
225
  rescue => e
218
226
  desc = "#{topic}/#{partition}"
219
227
  logger.error "Failed to process #{desc} at #{offsets}: #{e}"
220
228
 
221
- pause = pauses[topic][partition]
222
229
  logger.warn "Pausing partition #{desc} for #{pause.backoff_interval} seconds"
223
230
  consumer.pause(topic, partition, offsets.first)
224
231
  pause.pause!
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Racecar
2
- VERSION = "2.0.0.beta5"
4
+ VERSION = "2.2.0"
3
5
  end
@@ -20,10 +20,11 @@ Gem::Specification.new do |spec|
20
20
  spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
21
21
  spec.require_paths = ["lib"]
22
22
 
23
- spec.add_runtime_dependency "king_konf", "~> 0.3.7"
24
- spec.add_runtime_dependency "rdkafka", "~> 0.8.0.beta.1"
23
+ spec.add_runtime_dependency "king_konf", "~> 1.0.0"
24
+ spec.add_runtime_dependency "rdkafka", "~> 0.8.0"
25
25
 
26
26
  spec.add_development_dependency "bundler", [">= 1.13", "< 3"]
27
+ spec.add_development_dependency "pry"
27
28
  spec.add_development_dependency "rake", "> 10.0"
28
29
  spec.add_development_dependency "rspec", "~> 3.0"
29
30
  spec.add_development_dependency "timecop"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: racecar
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0.beta5
4
+ version: 2.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: exe
11
11
  cert_chain: []
12
- date: 2020-05-04 00:00:00.000000000 Z
12
+ date: 2021-01-29 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: king_konf
@@ -17,28 +17,28 @@ dependencies:
17
17
  requirements:
18
18
  - - "~>"
19
19
  - !ruby/object:Gem::Version
20
- version: 0.3.7
20
+ version: 1.0.0
21
21
  type: :runtime
22
22
  prerelease: false
23
23
  version_requirements: !ruby/object:Gem::Requirement
24
24
  requirements:
25
25
  - - "~>"
26
26
  - !ruby/object:Gem::Version
27
- version: 0.3.7
27
+ version: 1.0.0
28
28
  - !ruby/object:Gem::Dependency
29
29
  name: rdkafka
30
30
  requirement: !ruby/object:Gem::Requirement
31
31
  requirements:
32
32
  - - "~>"
33
33
  - !ruby/object:Gem::Version
34
- version: 0.8.0.beta.1
34
+ version: 0.8.0
35
35
  type: :runtime
36
36
  prerelease: false
37
37
  version_requirements: !ruby/object:Gem::Requirement
38
38
  requirements:
39
39
  - - "~>"
40
40
  - !ruby/object:Gem::Version
41
- version: 0.8.0.beta.1
41
+ version: 0.8.0
42
42
  - !ruby/object:Gem::Dependency
43
43
  name: bundler
44
44
  requirement: !ruby/object:Gem::Requirement
@@ -59,6 +59,20 @@ dependencies:
59
59
  - - "<"
60
60
  - !ruby/object:Gem::Version
61
61
  version: '3'
62
+ - !ruby/object:Gem::Dependency
63
+ name: pry
64
+ requirement: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - ">="
67
+ - !ruby/object:Gem::Version
68
+ version: '0'
69
+ type: :development
70
+ prerelease: false
71
+ version_requirements: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - ">="
74
+ - !ruby/object:Gem::Version
75
+ version: '0'
62
76
  - !ruby/object:Gem::Dependency
63
77
  name: rake
64
78
  requirement: !ruby/object:Gem::Requirement
@@ -151,17 +165,20 @@ executables:
151
165
  extensions: []
152
166
  extra_rdoc_files: []
153
167
  files:
154
- - ".github/workflows/rspec.yml"
168
+ - ".circleci/config.yml"
169
+ - ".github/workflows/ci.yml"
155
170
  - ".gitignore"
156
171
  - ".rspec"
157
172
  - CHANGELOG.md
158
173
  - Gemfile
174
+ - Gemfile.lock
159
175
  - LICENSE.txt
160
176
  - Procfile
161
177
  - README.md
162
178
  - Rakefile
163
179
  - bin/console
164
180
  - bin/setup
181
+ - docker-compose.yml
165
182
  - examples/batch_consumer.rb
166
183
  - examples/cat_consumer.rb
167
184
  - examples/producing_consumer.rb
@@ -203,12 +220,11 @@ required_ruby_version: !ruby/object:Gem::Requirement
203
220
  version: '0'
204
221
  required_rubygems_version: !ruby/object:Gem::Requirement
205
222
  requirements:
206
- - - ">"
223
+ - - ">="
207
224
  - !ruby/object:Gem::Version
208
- version: 1.3.1
225
+ version: '0'
209
226
  requirements: []
210
- rubyforge_project:
211
- rubygems_version: 2.7.6
227
+ rubygems_version: 3.1.2
212
228
  signing_key:
213
229
  specification_version: 4
214
230
  summary: A framework for running Kafka consumers
@@ -1,24 +0,0 @@
1
- name: Execute Specs
2
-
3
- on: [push]
4
-
5
- jobs:
6
- rspec:
7
-
8
- runs-on: ubuntu-latest
9
-
10
- strategy:
11
- matrix:
12
- ruby-version: ["2.5.x", "2.6.x"]
13
-
14
- steps:
15
- - uses: actions/checkout@v1
16
- - name: Set up Ruby 2.6
17
- uses: actions/setup-ruby@v1
18
- with:
19
- ruby-version: ${{ matrix.ruby-version }}
20
- - name: Build and test with RSpec
21
- run: |
22
- gem install bundler --no-document
23
- bundle install --jobs 4 --retry 3
24
- bundle exec rspec --format documentation --require spec_helper --color