karafka 2.0.15 → 2.0.16

Sign up to get free protection for your applications and to get access to all the features.
Files changed (105) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/.rspec +2 -0
  5. data/CHANGELOG.md +78 -0
  6. data/Gemfile.lock +14 -14
  7. data/LICENSE +1 -1
  8. data/README.md +2 -1
  9. data/bin/integrations +3 -2
  10. data/bin/rspecs +4 -0
  11. data/config/errors.yml +10 -4
  12. data/lib/active_job/karafka.rb +0 -6
  13. data/lib/karafka/active_job/consumer.rb +1 -0
  14. data/lib/karafka/admin.rb +2 -2
  15. data/lib/karafka/base_consumer.rb +31 -21
  16. data/lib/karafka/connection/listener.rb +6 -4
  17. data/lib/karafka/contracts/consumer_group.rb +0 -14
  18. data/lib/karafka/contracts/{consumer_group_topic.rb → topic.rb} +2 -3
  19. data/lib/karafka/errors.rb +6 -4
  20. data/lib/karafka/instrumentation/logger_listener.rb +25 -11
  21. data/lib/karafka/instrumentation/notifications.rb +2 -0
  22. data/lib/karafka/instrumentation/vendors/datadog/dashboard.json +1 -1
  23. data/lib/karafka/instrumentation/vendors/datadog/listener.rb +37 -32
  24. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +153 -0
  25. data/lib/karafka/pro/active_job/consumer.rb +3 -1
  26. data/lib/karafka/pro/active_job/dispatcher.rb +3 -1
  27. data/lib/karafka/pro/active_job/job_options_contract.rb +3 -1
  28. data/lib/karafka/pro/base_consumer.rb +3 -85
  29. data/lib/karafka/pro/loader.rb +31 -24
  30. data/lib/karafka/pro/performance_tracker.rb +3 -1
  31. data/lib/karafka/pro/processing/coordinator.rb +16 -1
  32. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +3 -1
  33. data/lib/karafka/pro/processing/jobs_builder.rb +3 -1
  34. data/lib/karafka/pro/processing/partitioner.rb +3 -1
  35. data/lib/karafka/pro/processing/scheduler.rb +3 -1
  36. data/lib/karafka/pro/processing/strategies/aj_dlq_lrj_mom.rb +40 -0
  37. data/lib/karafka/pro/processing/strategies/aj_dlq_mom.rb +62 -0
  38. data/lib/karafka/pro/processing/strategies/aj_lrj_mom.rb +35 -0
  39. data/lib/karafka/pro/processing/strategies/aj_lrj_mom_vp.rb +69 -0
  40. data/lib/karafka/pro/processing/strategies/aj_mom.rb +33 -0
  41. data/lib/karafka/pro/processing/strategies/aj_mom_vp.rb +58 -0
  42. data/lib/karafka/pro/processing/strategies/base.rb +26 -0
  43. data/lib/karafka/pro/processing/strategies/default.rb +69 -0
  44. data/lib/karafka/pro/processing/strategies/dlq.rb +88 -0
  45. data/lib/karafka/pro/processing/strategies/dlq_lrj.rb +64 -0
  46. data/lib/karafka/pro/processing/strategies/dlq_lrj_mom.rb +60 -0
  47. data/lib/karafka/pro/processing/strategies/dlq_mom.rb +58 -0
  48. data/lib/karafka/pro/processing/strategies/lrj.rb +76 -0
  49. data/lib/karafka/pro/processing/strategies/lrj_mom.rb +68 -0
  50. data/lib/karafka/pro/processing/strategies/lrj_vp.rb +33 -0
  51. data/lib/karafka/pro/processing/strategies/mom.rb +43 -0
  52. data/lib/karafka/pro/processing/strategies/vp.rb +32 -0
  53. data/lib/karafka/pro/processing/strategy_selector.rb +58 -0
  54. data/lib/karafka/pro/{contracts → routing/features}/base.rb +8 -5
  55. data/lib/karafka/pro/routing/features/dead_letter_queue/contract.rb +49 -0
  56. data/lib/karafka/pro/routing/{builder_extensions.rb → features/dead_letter_queue.rb} +9 -12
  57. data/lib/karafka/pro/routing/features/long_running_job/config.rb +28 -0
  58. data/lib/karafka/pro/routing/features/long_running_job/contract.rb +37 -0
  59. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +42 -0
  60. data/lib/karafka/pro/routing/features/long_running_job.rb +28 -0
  61. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +30 -0
  62. data/lib/karafka/pro/routing/features/virtual_partitions/contract.rb +69 -0
  63. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +56 -0
  64. data/lib/karafka/pro/routing/features/virtual_partitions.rb +27 -0
  65. data/lib/karafka/processing/coordinator.rb +1 -1
  66. data/lib/karafka/processing/executor.rb +6 -0
  67. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +44 -0
  68. data/lib/karafka/processing/strategies/aj_mom.rb +21 -0
  69. data/lib/karafka/processing/strategies/base.rb +37 -0
  70. data/lib/karafka/processing/strategies/default.rb +52 -0
  71. data/lib/karafka/processing/strategies/dlq.rb +77 -0
  72. data/lib/karafka/processing/strategies/dlq_mom.rb +42 -0
  73. data/lib/karafka/processing/strategies/mom.rb +29 -0
  74. data/lib/karafka/processing/strategy_selector.rb +30 -0
  75. data/lib/karafka/railtie.rb +9 -8
  76. data/lib/karafka/routing/builder.rb +6 -0
  77. data/lib/karafka/routing/features/active_job/builder.rb +33 -0
  78. data/lib/karafka/routing/features/active_job/config.rb +15 -0
  79. data/lib/karafka/routing/features/active_job/contract.rb +41 -0
  80. data/lib/karafka/routing/features/active_job/topic.rb +33 -0
  81. data/lib/karafka/routing/features/active_job.rb +13 -0
  82. data/lib/karafka/routing/features/base/expander.rb +53 -0
  83. data/lib/karafka/routing/features/base.rb +34 -0
  84. data/lib/karafka/routing/features/dead_letter_queue/config.rb +19 -0
  85. data/lib/karafka/routing/features/dead_letter_queue/contract.rb +40 -0
  86. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +40 -0
  87. data/lib/karafka/routing/features/dead_letter_queue.rb +16 -0
  88. data/lib/karafka/routing/features/manual_offset_management/config.rb +15 -0
  89. data/lib/karafka/routing/features/manual_offset_management/contract.rb +24 -0
  90. data/lib/karafka/routing/features/manual_offset_management/topic.rb +35 -0
  91. data/lib/karafka/routing/features/manual_offset_management.rb +18 -0
  92. data/lib/karafka/routing/topic.rb +2 -10
  93. data/lib/karafka/server.rb +4 -2
  94. data/lib/karafka/setup/attributes_map.rb +5 -0
  95. data/lib/karafka/setup/config.rb +4 -4
  96. data/lib/karafka/time_trackers/pause.rb +21 -12
  97. data/lib/karafka/version.rb +1 -1
  98. data/lib/karafka.rb +7 -11
  99. data.tar.gz.sig +0 -0
  100. metadata +57 -9
  101. metadata.gz.sig +0 -0
  102. data/lib/karafka/active_job/routing/extensions.rb +0 -33
  103. data/lib/karafka/pro/contracts/consumer_group.rb +0 -34
  104. data/lib/karafka/pro/contracts/consumer_group_topic.rb +0 -69
  105. data/lib/karafka/pro/routing/topic_extensions.rb +0 -74
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 79c991a988c67b681c8bdf16d06d61b3781be5bbc070d23a1258ea0152058d41
4
- data.tar.gz: 478d2155761e7fa4bb6b766a5fd598f3524b5810edddc36229185bb9ce396c3c
3
+ metadata.gz: 8cc0ac8b318d7c40e8974e2e26753c68cc5f611bbcd0e4fbea94e3eb275002dd
4
+ data.tar.gz: db638edaecc662ac8a0f7d627f4bcae6e1e1e83500a49c4dfa4a4f32ee8f78c9
5
5
  SHA512:
6
- metadata.gz: d9a3ba7e201947f0ced657eb61aa8f4af348f70a85167a9194f34039d070248551aa3a09d270930ccac399d774fc89305a4f2bc36da715066cd789997fbb40d9
7
- data.tar.gz: b5cf1015aa851c7ab78fa89e2b2317085c18f24c8b5a03f5b82c8c3c7ac2ef95fe6a1888defc97f62492c107c2b0b2cd1b72bbe5811fb99dd92e39e93f912608
6
+ metadata.gz: 6875e7e152b4150f397f3830c828412e0477409483c81e714525161e5263cff686c0cb9b46a6c11cb5b914dc51087e4bac95d5dcdc0e25c7233825b363ed7e6c
7
+ data.tar.gz: 1f35d410350dc3fbe97462e979f3df832484df1f1f5119e16fe2c28210d01a05e1d3e086c73e1a3b70ba3c6cae9a25dd50e0907ca454b68c9166efcabd22f972
checksums.yaml.gz.sig CHANGED
Binary file
@@ -81,7 +81,7 @@ jobs:
81
81
  - name: Run all specs
82
82
  env:
83
83
  GITHUB_COVERAGE: ${{matrix.coverage}}
84
- run: bundle exec rspec
84
+ run: bin/rspecs
85
85
 
86
86
  integrations:
87
87
  runs-on: ubuntu-latest
data/.rspec CHANGED
@@ -1,3 +1,5 @@
1
1
  --require spec_helper
2
2
  # Integration specs run with their one framework, not via RSpec
3
3
  --exclude-pattern "spec/integrations/**/*_spec.rb"
4
+ # Do not run pro at the same time as default specs as it would cause state conflicts
5
+ --tag ~type:pro
data/CHANGELOG.md CHANGED
@@ -1,5 +1,83 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.0.16 (2022-11-09)
4
+ - **[Breaking]** Disable the root `manual_offset_management` setting and require it to be configured per topic. This is part of "topic features" configuration extraction for better code organization.
5
+ - **[Feature]** Introduce **Dead Letter Queue** feature and Pro **Enhanced Dead Letter Queue** feature
6
+ - [Improvement] Align attributes available in the instrumentation bus for listener related events.
7
+ - [Improvement] Include consumer group id in consumption related events (#1093)
8
+ - [Improvement] Delegate pro components loading to Zeitwerk
9
+ - [Improvement] Include `Datadog::LoggerListener` for tracking logger data with DataDog (@bruno-b-martins)
10
+ - [Improvement] Include `seek_offset` in the `consumer.consume.error` event payload (#1113)
11
+ - [Refactor] Remove unused logger listener event handler.
12
+ - [Refactor] Internal refactoring of routing validations flow.
13
+ - [Refactor] Reorganize how routing related features are represented internally to simplify features management.
14
+ - [Refactor] Extract supported features combinations processing flow into separate strategies.
15
+ - [Refactor] Auto-create topics in the integration specs based on the defined routing
16
+ - [Refactor] Auto-inject Pro components via composition instead of requiring to use `Karafka::Pro::BaseConsumer` (#1116)
17
+ - [Fix] Fix a case where routing tags would not be injected when given routing definition would not be used with a block
18
+ - [Fix] Fix a case where using `#active_job_topic` without extra block options would cause `manual_offset_management` to stay false.
19
+ - [Fix] Fix a case when upon Pro ActiveJob usage with Virtual Partitions, correct offset would not be stored
20
+ - [Fix] Fix a case where upon Virtual Partitions usage, same underlying real partition would be resumed several times.
21
+ - [Fix] Fix LRJ enqueuing pause increases the coordinator counter (#115)
22
+ - [Fix] Release `ActiveRecord` connection to the pool after the work in non-dev envs (#1130)
23
+ - [Fix] Fix a case where post-initialization shutdown would not initiate shutdown procedures.
24
+ - [Fix] Prevent Karafka from committing offsets twice upon shutdown.
25
+ - [Fix] Fix for a case where fast consecutive stop signaling could hang the stopping listeners.
26
+ - [Specs] Split specs into regular and pro to simplify how resources are loaded
27
+ - [Specs] Add specs to ensure, that all the Pro components have a proper per-file license (#1099)
28
+
29
+
30
+ ### Upgrade notes
31
+
32
+ 1. Remove the `manual_offset_management` setting from the main config if you use it:
33
+
34
+ ```ruby
35
+ class KarafkaApp < Karafka::App
36
+ setup do |config|
37
+ # ...
38
+
39
+ # This line needs to be removed:
40
+ config.manual_offset_management = true
41
+ end
42
+ end
43
+ ```
44
+
45
+ 2. Set the `manual_offset_management` feature flag per each topic where you want to use it in the routing. Don't set it for topics where you want the default offset management strategy to be used.
46
+
47
+ ```ruby
48
+ class KarafkaApp < Karafka::App
49
+ routes.draw do
50
+ consumer_group :group_name do
51
+ topic :example do
52
+ consumer ExampleConsumer
53
+ manual_offset_management true
54
+ end
55
+
56
+ topic :example2 do
57
+ consumer ExampleConsumer2
58
+ manual_offset_management true
59
+ end
60
+ end
61
+ end
62
+ end
63
+ ```
64
+
65
+ 3. If you were using code to restart dead connections similar to this:
66
+
67
+ ```ruby
68
+ class ActiveRecordConnectionsCleaner
69
+ def on_error_occurred(event)
70
+ return unless event[:error].is_a?(ActiveRecord::StatementInvalid)
71
+
72
+ ::ActiveRecord::Base.clear_active_connections!
73
+ end
74
+ end
75
+
76
+ Karafka.monitor.subscribe(ActiveRecordConnectionsCleaner.new)
77
+ ```
78
+
79
+ It **should** be removed. This code is **no longer needed**.
80
+
3
81
  ## 2.0.15 (2022-10-20)
4
82
  - Sanitize admin config prior to any admin action.
5
83
  - Make messages partitioner outcome for virtual partitions consistently distributed in regards to concurrency.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.0.15)
4
+ karafka (2.0.16)
5
5
  karafka-core (>= 2.0.2, < 3.0.0)
6
6
  rdkafka (>= 0.12)
7
7
  thor (>= 0.20)
@@ -39,19 +39,19 @@ GEM
39
39
  ffi (~> 1.15)
40
40
  mini_portile2 (~> 2.6)
41
41
  rake (> 12)
42
- rspec (3.11.0)
43
- rspec-core (~> 3.11.0)
44
- rspec-expectations (~> 3.11.0)
45
- rspec-mocks (~> 3.11.0)
46
- rspec-core (3.11.0)
47
- rspec-support (~> 3.11.0)
48
- rspec-expectations (3.11.1)
42
+ rspec (3.12.0)
43
+ rspec-core (~> 3.12.0)
44
+ rspec-expectations (~> 3.12.0)
45
+ rspec-mocks (~> 3.12.0)
46
+ rspec-core (3.12.0)
47
+ rspec-support (~> 3.12.0)
48
+ rspec-expectations (3.12.0)
49
49
  diff-lcs (>= 1.2.0, < 2.0)
50
- rspec-support (~> 3.11.0)
51
- rspec-mocks (3.11.1)
50
+ rspec-support (~> 3.12.0)
51
+ rspec-mocks (3.12.0)
52
52
  diff-lcs (>= 1.2.0, < 2.0)
53
- rspec-support (~> 3.11.0)
54
- rspec-support (3.11.1)
53
+ rspec-support (~> 3.12.0)
54
+ rspec-support (3.12.0)
55
55
  simplecov (0.21.2)
56
56
  docile (~> 1.1)
57
57
  simplecov-html (~> 0.11)
@@ -65,7 +65,7 @@ GEM
65
65
  karafka-core (>= 2.0.2, < 3.0.0)
66
66
  rdkafka (>= 0.10)
67
67
  zeitwerk (~> 2.3)
68
- zeitwerk (2.6.1)
68
+ zeitwerk (2.6.6)
69
69
 
70
70
  PLATFORMS
71
71
  x86_64-linux
@@ -79,4 +79,4 @@ DEPENDENCIES
79
79
  simplecov
80
80
 
81
81
  BUNDLED WITH
82
- 2.3.22
82
+ 2.3.24
data/LICENSE CHANGED
@@ -12,6 +12,6 @@ and their usage requires commercial license agreement.
12
12
  By sending a pull request to the pro components, you are agreeing to transfer the copyright of your
13
13
  code to Maciej Mensfeld.
14
14
 
15
- You can find the commercial license in LICENSE-COM.
15
+ You can find the commercial license in LICENSE-COMM.
16
16
 
17
17
  Please see https://karafka.io for purchasing options.
data/README.md CHANGED
@@ -9,8 +9,9 @@
9
9
  Karafka is a Ruby and Rails multi-threaded efficient Kafka processing framework that:
10
10
 
11
11
  - Supports parallel processing in [multiple threads](https://karafka.io/docs/Concurrency-and-multithreading) (also for a [single topic partition](https://karafka.io/docs/Pro-Virtual-Partitions) work)
12
- - Has [ActiveJob backend](https://karafka.io/docs/Active-Job) support (including [ordered jobs](https://karafka.io/docs/Pro-Enhanced-Active-Job#ordered-jobs))
13
12
  - [Automatically integrates](https://karafka.io/docs/Integrating-with-Ruby-on-Rails-and-other-frameworks#integrating-with-ruby-on-rails) with Ruby on Rails
13
+ - Has [ActiveJob backend](https://karafka.io/docs/Active-Job) support (including [ordered jobs](https://karafka.io/docs/Pro-Enhanced-Active-Job#ordered-jobs))
14
+ - Has a seamless [Dead Letter Queue](karafka.io/docs/Dead-Letter-Queue/) functionality built-in
14
15
  - Supports in-development [code reloading](https://karafka.io/docs/Auto-reload-of-code-changes-in-development)
15
16
  - Is powered by [librdkafka](https://github.com/edenhill/librdkafka) (the Apache Kafka C/C++ client library)
16
17
  - Has an out-of the box [StatsD/DataDog monitoring](https://karafka.io/docs/Monitoring-and-logging) with a dashboard template.
data/bin/integrations CHANGED
@@ -21,9 +21,10 @@ require 'etc'
21
21
  ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../')))
22
22
 
23
23
  # How many child processes with integration specs do we want to run in parallel
24
- # When the value is high, there's a problem with thread allocation on Github CI, tht is why
24
+ # When the value is high, there's a problem with thread allocation on Github CI, that is why
25
25
  # we limit it. Locally we can run a lot of those, as many of them have sleeps and do not use a lot
26
- # of CPU
26
+ # of CPU. Locally we also cannot go beyond certain limit due to how often and how many topics we
27
+ # create in Kafka. With an overloaded system, we start getting timeouts.
27
28
  CONCURRENCY = ENV.key?('CI') ? 4 : Etc.nprocessors * 2
28
29
 
29
30
  # How may bytes do we want to keep from the stdout in the buffer for when we need to print it
data/bin/rspecs ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env bash
2
+
3
+ SPECS_TYPE=regular bundle exec rspec --tag ~type:pro
4
+ SPECS_TYPE=pro bundle exec rspec --tag type:pro
data/config/errors.yml CHANGED
@@ -27,15 +27,20 @@ en:
27
27
  missing: needs to be present
28
28
  consumer_groups_inclusion: Unknown consumer group
29
29
 
30
- consumer_group_topic:
30
+ topic:
31
31
  missing: needs to be present
32
32
  name_format: 'needs to be a string with a Kafka accepted format'
33
33
  deserializer_format: needs to be present
34
- manual_offset_management_format: needs to be either true or false
35
34
  consumer_format: needs to be present
36
35
  id_format: 'needs to be a string with a Kafka accepted format'
37
36
  initial_offset_format: needs to be either earliest or latest
38
37
  subscription_group_format: must be nil or a non-empty string
38
+ manual_offset_management.active_format: needs to be either true or false
39
+ consumer_active_job_missing: ActiveJob needs to be available
40
+ manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
41
+ dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
42
+ dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
43
+ dead_letter_queue.active_format: needs to be either true or false
39
44
 
40
45
  consumer_group:
41
46
  missing: needs to be present
@@ -53,8 +58,9 @@ en:
53
58
  missing: needs to be present
54
59
  id_format: needs to be a String
55
60
 
56
- pro_consumer_group_topic:
57
- consumer_format: needs to inherit from Karafka::Pro::BaseConsumer and not Karafka::Consumer
61
+ pro_topic:
58
62
  virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
59
63
  virtual_partitions.max_partitions_format: needs to be equl or more than 1
60
64
  manual_offset_management_not_with_virtual_partitions: cannot be used together with Virtual Partitions
65
+ long_running_job.active_format: needs to be either true or false
66
+ dead_letter_queue_not_with_virtual_partitions: cannot be used together with Virtual Partitions
@@ -10,12 +10,6 @@ begin
10
10
  module Karafka
11
11
  end
12
12
  end
13
-
14
- # We extend routing builder by adding a simple wrapper for easier jobs topics defining
15
- # This needs to be extended here as it is going to be used in karafka routes, hence doing that in
16
- # the railtie initializer would be too late
17
- ::Karafka::Routing::Builder.include ::Karafka::ActiveJob::Routing::Extensions
18
- ::Karafka::Routing::Proxy.include ::Karafka::ActiveJob::Routing::Extensions
19
13
  rescue LoadError
20
14
  # We extend ActiveJob stuff in the railtie
21
15
  end
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Karafka
4
+ # Namespace for all the ActiveJob related things from within Karafka
4
5
  module ActiveJob
5
6
  # This is the consumer for ActiveJob that eats the messages enqueued with it one after another.
6
7
  # It marks the offset after each message, so we make sure, none of the jobs is executed twice
data/lib/karafka/admin.rb CHANGED
@@ -21,7 +21,7 @@ module Karafka
21
21
  with_admin do |admin|
22
22
  admin.create_topic(name, partitions, replication_factor, topic_config)
23
23
 
24
- sleep(0.1) until topics_names.include?(name)
24
+ sleep(0.2) until topics_names.include?(name)
25
25
  end
26
26
  end
27
27
 
@@ -32,7 +32,7 @@ module Karafka
32
32
  with_admin do |admin|
33
33
  admin.delete_topic(name)
34
34
 
35
- sleep(0.1) while topics_names.include?(name)
35
+ sleep(0.2) while topics_names.include?(name)
36
36
  end
37
37
  end
38
38
 
@@ -21,7 +21,16 @@ module Karafka
21
21
  # @note This should not be used by the end users as it is part of the lifecycle of things and
22
22
  # not as a part of the public api. This should not perform any extensive operations as it is
23
23
  # blocking and running in the listener thread.
24
- def on_before_enqueue; end
24
+ def on_before_enqueue
25
+ handle_before_enqueue
26
+ rescue StandardError => e
27
+ Karafka.monitor.instrument(
28
+ 'error.occurred',
29
+ error: e,
30
+ caller: self,
31
+ type: 'consumer.before_enqueue.error'
32
+ )
33
+ end
25
34
 
26
35
  # Can be used to run preparation code in the worker
27
36
  #
@@ -32,6 +41,17 @@ module Karafka
32
41
  def on_before_consume
33
42
  messages.metadata.processed_at = Time.now
34
43
  messages.metadata.freeze
44
+
45
+ # We run this after the full metadata setup, so we can use all the messages information
46
+ # if needed
47
+ handle_before_consume
48
+ rescue StandardError => e
49
+ Karafka.monitor.instrument(
50
+ 'error.occurred',
51
+ error: e,
52
+ caller: self,
53
+ type: 'consumer.before_consume.error'
54
+ )
35
55
  end
36
56
 
37
57
  # Executes the default consumer flow.
@@ -54,6 +74,7 @@ module Karafka
54
74
  'error.occurred',
55
75
  error: e,
56
76
  caller: self,
77
+ seek_offset: coordinator.seek_offset,
57
78
  type: 'consumer.consume.error'
58
79
  )
59
80
  ensure
@@ -65,32 +86,21 @@ module Karafka
65
86
  # @note This should not be used by the end users as it is part of the lifecycle of things but
66
87
  # not as part of the public api.
67
88
  def on_after_consume
68
- return if revoked?
69
-
70
- if coordinator.success?
71
- coordinator.pause_tracker.reset
72
-
73
- # Mark as consumed only if manual offset management is not on
74
- return if topic.manual_offset_management?
75
-
76
- # We use the non-blocking one here. If someone needs the blocking one, can implement it
77
- # with manual offset management
78
- mark_as_consumed(messages.last)
79
- else
80
- pause(coordinator.seek_offset)
81
- end
89
+ handle_after_consume
90
+ rescue StandardError => e
91
+ Karafka.monitor.instrument(
92
+ 'error.occurred',
93
+ error: e,
94
+ caller: self,
95
+ type: 'consumer.after_consume.error'
96
+ )
82
97
  end
83
98
 
84
99
  # Trigger method for running on partition revocation.
85
100
  #
86
101
  # @private
87
102
  def on_revoked
88
- # We need to always un-pause the processing in case we have lost a given partition.
89
- # Otherwise the underlying librdkafka would not know we may want to continue processing and
90
- # the pause could in theory last forever
91
- resume
92
-
93
- coordinator.revoke
103
+ handle_revoked
94
104
 
95
105
  Karafka.monitor.instrument('consumer.revoked', caller: self) do
96
106
  revoked
@@ -46,8 +46,8 @@ module Karafka
46
46
  Karafka.monitor.instrument(
47
47
  'connection.listener.before_fetch_loop',
48
48
  caller: self,
49
- subscription_group: @subscription_group,
50
- client: @client
49
+ client: @client,
50
+ subscription_group: @subscription_group
51
51
  )
52
52
 
53
53
  fetch_loop
@@ -67,7 +67,6 @@ module Karafka
67
67
  @stopped = true
68
68
  @executors.clear
69
69
  @coordinators.reset
70
- @client.commit_offsets!
71
70
  @client.stop
72
71
  end
73
72
  end
@@ -87,7 +86,8 @@ module Karafka
87
86
  Karafka.monitor.instrument(
88
87
  'connection.listener.fetch_loop',
89
88
  caller: self,
90
- client: @client
89
+ client: @client,
90
+ subscription_group: @subscription_group
91
91
  )
92
92
 
93
93
  resume_paused_partitions
@@ -95,6 +95,8 @@ module Karafka
95
95
  Karafka.monitor.instrument(
96
96
  'connection.listener.fetch_loop.received',
97
97
  caller: self,
98
+ client: @client,
99
+ subscription_group: @subscription_group,
98
100
  messages_buffer: @messages_buffer
99
101
  ) do
100
102
  # We need to fetch data before we revoke lost partitions details as during the polling
@@ -24,20 +24,6 @@ module Karafka
24
24
 
25
25
  [[%i[topics], :names_not_unique]]
26
26
  end
27
-
28
- virtual do |data, errors|
29
- next unless errors.empty?
30
-
31
- fetched_errors = []
32
-
33
- data.fetch(:topics).each do |topic|
34
- ConsumerGroupTopic.new.call(topic).errors.each do |key, value|
35
- fetched_errors << [[topic, key].flatten, value]
36
- end
37
- end
38
-
39
- fetched_errors
40
- end
41
27
  end
42
28
  end
43
29
  end
@@ -3,13 +3,13 @@
3
3
  module Karafka
4
4
  module Contracts
5
5
  # Consumer group topic validation rules.
6
- class ConsumerGroupTopic < Base
6
+ class Topic < Base
7
7
  configure do |config|
8
8
  config.error_messages = YAML.safe_load(
9
9
  File.read(
10
10
  File.join(Karafka.gem_root, 'config', 'errors.yml')
11
11
  )
12
- ).fetch('en').fetch('validations').fetch('consumer_group_topic')
12
+ ).fetch('en').fetch('validations').fetch('topic')
13
13
  end
14
14
 
15
15
  required(:consumer) { |val| !val.nil? }
@@ -19,7 +19,6 @@ module Karafka
19
19
  required(:max_messages) { |val| val.is_a?(Integer) && val >= 1 }
20
20
  required(:initial_offset) { |val| %w[earliest latest].include?(val) }
21
21
  required(:max_wait_time) { |val| val.is_a?(Integer) && val >= 10 }
22
- required(:manual_offset_management) { |val| [true, false].include?(val) }
23
22
  required(:name) { |val| val.is_a?(String) && Contracts::TOPIC_REGEXP.match?(val) }
24
23
  required(:subscription_group) { |val| val.nil? || (val.is_a?(String) && !val.empty?) }
25
24
 
@@ -41,11 +41,13 @@ module Karafka
41
41
  # Raised when the license token is not valid
42
42
  InvalidLicenseTokenError = Class.new(BaseError)
43
43
 
44
- # Used to instrument this error into the error notifications
45
- # We do not raise it so we won't crash deployed systems
46
- ExpiredLicenseTokenError = Class.new(BaseError)
44
+ # This should never happen. Please open an issue if it does.
45
+ InvalidCoordinatorStateError = Class.new(BaseError)
46
+
47
+ # This should never happen. Please open an issue if it does.
48
+ StrategyNotFoundError = Class.new(BaseError)
47
49
 
48
50
  # This should never happen. Please open an issue if it does.
49
- InvalidCoordinatorState = Class.new(BaseError)
51
+ SkipMessageNotFoundError = Class.new(BaseError)
50
52
  end
51
53
  end
@@ -14,6 +14,8 @@ module Karafka
14
14
  fatal
15
15
  ].freeze
16
16
 
17
+ private_constant :USED_LOG_LEVELS
18
+
17
19
  # Logs each messages fetching attempt
18
20
  #
19
21
  # @param event [Dry::Events::Event] event details including payload
@@ -85,13 +87,6 @@ module Karafka
85
87
  end
86
88
  end
87
89
 
88
- # Logs info that we're initializing Karafka app.
89
- #
90
- # @param _event [Dry::Events::Event] event details including payload
91
- def on_app_initializing(_event)
92
- info 'Initializing Karafka framework'
93
- end
94
-
95
90
  # Logs info that we're running Karafka app.
96
91
  #
97
92
  # @param _event [Dry::Events::Event] event details including payload
@@ -118,6 +113,19 @@ module Karafka
118
113
  info 'Stopped Karafka server'
119
114
  end
120
115
 
116
+ # Logs info when we have dispatched a message the the DLQ
117
+ #
118
+ # @param event [Dry::Events::Event] event details including payload
119
+ def on_dead_letter_queue_dispatched(event)
120
+ message = event[:message]
121
+ offset = message.offset
122
+ topic = event[:caller].topic.name
123
+ dlq_topic = event[:caller].topic.dead_letter_queue.topic
124
+ partition = message.partition
125
+
126
+ info "Dispatched message #{offset} from #{topic}/#{partition} to DQL topic: #{dlq_topic}"
127
+ end
128
+
121
129
  # There are many types of errors that can occur in many places, but we provide a single
122
130
  # handler for all of them to simplify error instrumentation.
123
131
  # @param event [Dry::Events::Event] event details including payload
@@ -133,6 +141,15 @@ module Karafka
133
141
  when 'consumer.revoked.error'
134
142
  error "Consumer on revoked failed due to an error: #{error}"
135
143
  error details
144
+ when 'consumer.before_enqueue.error'
145
+ error "Consumer before enqueue failed due to an error: #{error}"
146
+ error details
147
+ when 'consumer.before_consume.error'
148
+ error "Consumer before consume failed due to an error: #{error}"
149
+ error details
150
+ when 'consumer.after_consume.error'
151
+ error "Consumer after consume failed due to an error: #{error}"
152
+ error details
136
153
  when 'consumer.shutdown.error'
137
154
  error "Consumer on shutdown failed due to an error: #{error}"
138
155
  error details
@@ -142,9 +159,6 @@ module Karafka
142
159
  when 'connection.listener.fetch_loop.error'
143
160
  error "Listener fetch loop error: #{error}"
144
161
  error details
145
- when 'licenser.expired'
146
- error error
147
- error details
148
162
  when 'runner.call.error'
149
163
  fatal "Runner crashed due to an error: #{error}"
150
164
  fatal details
@@ -154,7 +168,7 @@ module Karafka
154
168
  error "librdkafka internal error occurred: #{error}"
155
169
  error details
156
170
  # Those will only occur when retries in the client fail and when they did not stop after
157
- # backoffs
171
+ # back-offs
158
172
  when 'connection.client.poll.error'
159
173
  error "Data polling error occurred: #{error}"
160
174
  error details
@@ -32,6 +32,8 @@ module Karafka
32
32
  connection.listener.fetch_loop
33
33
  connection.listener.fetch_loop.received
34
34
 
35
+ dead_letter_queue.dispatched
36
+
35
37
  worker.process
36
38
  worker.processed
37
39
  worker.completed