karafka 2.0.37 → 2.0.39

Sign up to get free protection for your applications and to get access to all the features.
Files changed (116) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +34 -0
  6. data/Gemfile.lock +7 -7
  7. data/README.md +1 -1
  8. data/bin/integrations +1 -1
  9. data/config/locales/errors.yml +0 -7
  10. data/config/locales/pro_errors.yml +18 -0
  11. data/lib/karafka/active_job/consumer.rb +22 -7
  12. data/lib/karafka/admin.rb +46 -14
  13. data/lib/karafka/base_consumer.rb +35 -55
  14. data/lib/karafka/connection/listener.rb +15 -10
  15. data/lib/karafka/errors.rb +0 -3
  16. data/lib/karafka/instrumentation/logger_listener.rb +44 -3
  17. data/lib/karafka/instrumentation/notifications.rb +7 -0
  18. data/lib/karafka/pro/active_job/consumer.rb +10 -5
  19. data/lib/karafka/pro/processing/coordinator.rb +13 -4
  20. data/lib/karafka/pro/processing/filters/base.rb +61 -0
  21. data/lib/karafka/pro/processing/filters/delayer.rb +70 -0
  22. data/lib/karafka/pro/processing/filters/expirer.rb +51 -0
  23. data/lib/karafka/pro/processing/filters/throttler.rb +84 -0
  24. data/lib/karafka/pro/processing/filters_applier.rb +100 -0
  25. data/lib/karafka/pro/processing/jobs_builder.rb +7 -3
  26. data/lib/karafka/pro/processing/scheduler.rb +24 -7
  27. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +68 -0
  28. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +74 -0
  29. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +72 -0
  30. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +76 -0
  31. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +62 -0
  32. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +68 -0
  33. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +64 -0
  34. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +69 -0
  35. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +38 -0
  36. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +64 -0
  37. data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +38 -0
  38. data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +58 -0
  39. data/lib/karafka/pro/processing/strategies/{dlq_lrj_vp.rb → aj/lrj_mom.rb} +14 -13
  40. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +77 -0
  41. data/lib/karafka/pro/processing/strategies/aj/mom.rb +36 -0
  42. data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +52 -0
  43. data/lib/karafka/pro/processing/strategies/dlq/default.rb +131 -0
  44. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +61 -0
  45. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +75 -0
  46. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +74 -0
  47. data/lib/karafka/pro/processing/strategies/{mom.rb → dlq/ftr_lrj_vp.rb} +16 -19
  48. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +73 -0
  49. data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +39 -0
  50. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +63 -0
  51. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +66 -0
  52. data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +38 -0
  53. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +67 -0
  54. data/lib/karafka/pro/processing/strategies/dlq/vp.rb +39 -0
  55. data/lib/karafka/pro/processing/strategies/ftr/default.rb +104 -0
  56. data/lib/karafka/pro/processing/strategies/ftr/vp.rb +40 -0
  57. data/lib/karafka/pro/processing/strategies/lrj/default.rb +85 -0
  58. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +69 -0
  59. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +67 -0
  60. data/lib/karafka/pro/processing/strategies/{vp.rb → lrj/ftr_vp.rb} +15 -13
  61. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +78 -0
  62. data/lib/karafka/pro/processing/strategies/{aj_lrj_mom.rb → lrj/vp.rb} +13 -12
  63. data/lib/karafka/pro/processing/strategies/mom/default.rb +46 -0
  64. data/lib/karafka/pro/processing/strategies/mom/ftr.rb +53 -0
  65. data/lib/karafka/pro/processing/strategies/vp/default.rb +53 -0
  66. data/lib/karafka/pro/processing/{strategies/lrj_vp.rb → strategies.rb} +1 -13
  67. data/lib/karafka/pro/processing/strategy_selector.rb +44 -18
  68. data/lib/karafka/pro/{processing/strategies/aj_mom.rb → routing/features/delaying/config.rb} +7 -13
  69. data/lib/karafka/pro/routing/features/delaying/contract.rb +38 -0
  70. data/lib/karafka/pro/routing/features/delaying/topic.rb +59 -0
  71. data/lib/karafka/pro/routing/features/delaying.rb +29 -0
  72. data/lib/karafka/pro/routing/features/expiring/config.rb +27 -0
  73. data/lib/karafka/pro/routing/features/expiring/contract.rb +38 -0
  74. data/lib/karafka/pro/routing/features/expiring/topic.rb +59 -0
  75. data/lib/karafka/pro/routing/features/expiring.rb +27 -0
  76. data/lib/karafka/pro/routing/features/filtering/config.rb +40 -0
  77. data/lib/karafka/pro/routing/features/filtering/contract.rb +41 -0
  78. data/lib/karafka/pro/routing/features/filtering/topic.rb +51 -0
  79. data/lib/karafka/pro/routing/features/filtering.rb +27 -0
  80. data/lib/karafka/pro/routing/features/long_running_job/contract.rb +1 -1
  81. data/lib/karafka/pro/routing/features/throttling/config.rb +32 -0
  82. data/lib/karafka/pro/routing/features/throttling/contract.rb +41 -0
  83. data/lib/karafka/pro/routing/features/throttling/topic.rb +69 -0
  84. data/lib/karafka/pro/routing/features/throttling.rb +30 -0
  85. data/lib/karafka/processing/coordinator.rb +60 -30
  86. data/lib/karafka/processing/coordinators_buffer.rb +5 -1
  87. data/lib/karafka/processing/executor.rb +23 -16
  88. data/lib/karafka/processing/executors_buffer.rb +10 -26
  89. data/lib/karafka/processing/jobs/consume.rb +2 -4
  90. data/lib/karafka/processing/jobs/idle.rb +24 -0
  91. data/lib/karafka/processing/jobs_builder.rb +2 -3
  92. data/lib/karafka/processing/result.rb +5 -0
  93. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +1 -1
  94. data/lib/karafka/processing/strategies/base.rb +5 -0
  95. data/lib/karafka/processing/strategies/default.rb +50 -0
  96. data/lib/karafka/processing/strategies/dlq.rb +13 -4
  97. data/lib/karafka/processing/strategies/dlq_mom.rb +8 -3
  98. data/lib/karafka/processing/strategy_selector.rb +27 -10
  99. data/lib/karafka/version.rb +1 -1
  100. data/renovate.json +6 -0
  101. data.tar.gz.sig +0 -0
  102. metadata +66 -22
  103. metadata.gz.sig +0 -0
  104. data/lib/karafka/pro/processing/strategies/aj_dlq_lrj_mom.rb +0 -42
  105. data/lib/karafka/pro/processing/strategies/aj_dlq_lrj_mom_vp.rb +0 -70
  106. data/lib/karafka/pro/processing/strategies/aj_dlq_mom.rb +0 -62
  107. data/lib/karafka/pro/processing/strategies/aj_dlq_mom_vp.rb +0 -68
  108. data/lib/karafka/pro/processing/strategies/aj_lrj_mom_vp.rb +0 -75
  109. data/lib/karafka/pro/processing/strategies/aj_mom_vp.rb +0 -62
  110. data/lib/karafka/pro/processing/strategies/dlq.rb +0 -120
  111. data/lib/karafka/pro/processing/strategies/dlq_lrj.rb +0 -65
  112. data/lib/karafka/pro/processing/strategies/dlq_lrj_mom.rb +0 -62
  113. data/lib/karafka/pro/processing/strategies/dlq_mom.rb +0 -62
  114. data/lib/karafka/pro/processing/strategies/dlq_vp.rb +0 -37
  115. data/lib/karafka/pro/processing/strategies/lrj.rb +0 -83
  116. data/lib/karafka/pro/processing/strategies/lrj_mom.rb +0 -73
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class Filtering < Base
19
+ # Filtering feature configuration
20
+ Config = Struct.new(:factories, keyword_init: true) do
21
+ # @return [Boolean] is this feature in use. Are any filters defined
22
+ def active?
23
+ !factories.empty?
24
+ end
25
+
26
+ # @return [Array<Object>] array of filters applicable to a topic partition
27
+ def filters
28
+ factories.map(&:call)
29
+ end
30
+
31
+ # @return [Hash] this config hash
32
+ def to_h
33
+ super.merge(active: active?)
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class Filtering < Base
19
+ # Contract to validate configuration of the filtering feature
20
+ class Contract < Contracts::Base
21
+ configure do |config|
22
+ config.error_messages = YAML.safe_load(
23
+ File.read(
24
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
25
+ )
26
+ ).fetch('en').fetch('validations').fetch('topic')
27
+ end
28
+
29
+ nested(:filtering) do
30
+ required(:active) { |val| [true, false].include?(val) }
31
+
32
+ required(:factories) do |val|
33
+ val.is_a?(Array) && val.all? { |factory| factory.respond_to?(:call) }
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class Filtering < Base
19
+ # Filtering feature topic extensions
20
+ module Topic
21
+ # @param factory [#call, nil] Callable that can produce new filters instances per
22
+ # assigned topic partition. nil as default so this feature is disabled
23
+ def filter(factory = nil)
24
+ @filtering ||= Config.new(factories: [])
25
+ @filtering.factories << factory if factory
26
+ @filtering
27
+ end
28
+
29
+ # @param args [Array] Anything `#filter` accepts
30
+ # @return [Filtering::Config] alias to match the naming API for features
31
+ def filtering(*args)
32
+ filter(*args)
33
+ end
34
+
35
+ # @return [Boolean] is a given job throttled
36
+ def filtering?
37
+ filtering.active?
38
+ end
39
+
40
+ # @return [Hash] topic with all its native configuration options plus throttling
41
+ def to_h
42
+ super.merge(
43
+ filtering: filtering.to_h
44
+ ).freeze
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ # Filtering provides a generic API allowing you to pre-filter messages before they are
19
+ # dispatched to jobs and processed.
20
+ #
21
+ # It allows for throttling, delayed jobs and other filtering implementations.
22
+ class Filtering < Base
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
@@ -16,7 +16,7 @@ module Karafka
16
16
  module Routing
17
17
  module Features
18
18
  class LongRunningJob < Base
19
- # Rules around manual offset management settings
19
+ # Rules around long-running job settings
20
20
  class Contract < Contracts::Base
21
21
  configure do |config|
22
22
  config.error_messages = YAML.safe_load(
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class Throttling < Base
19
+ # Throttling feature configuration
20
+ Config = Struct.new(
21
+ :active,
22
+ :limit,
23
+ :interval,
24
+ keyword_init: true
25
+ ) do
26
+ alias_method :active?, :active
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class Throttling < Base
19
+ # Rules around throttling settings
20
+ class Contract < Contracts::Base
21
+ configure do |config|
22
+ config.error_messages = YAML.safe_load(
23
+ File.read(
24
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
25
+ )
26
+ ).fetch('en').fetch('validations').fetch('topic')
27
+ end
28
+
29
+ nested(:throttling) do
30
+ required(:active) { |val| [true, false].include?(val) }
31
+ required(:interval) { |val| val.is_a?(Integer) && val.positive? }
32
+ required(:limit) do |val|
33
+ (val.is_a?(Integer) || val == Float::INFINITY) && val.positive?
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ class Throttling < Base
19
+ # Topic throttling API extensions
20
+ module Topic
21
+ # @param limit [Integer] max messages to process in an time interval
22
+ # @param interval [Integer] time interval for processing
23
+ def throttling(
24
+ limit: Float::INFINITY,
25
+ interval: 60_000
26
+ )
27
+ # Those settings are used for validation
28
+ @throttling ||= begin
29
+ config = Config.new(
30
+ active: limit != Float::INFINITY,
31
+ limit: limit,
32
+ interval: interval
33
+ )
34
+
35
+ # If someone defined throttling setup, we need to create appropriate filter for it
36
+ # and inject it via filtering feature
37
+ if config.active?
38
+ factory = ->(*) { Pro::Processing::Filters::Throttler.new(limit, interval) }
39
+ filter(factory)
40
+ end
41
+
42
+ config
43
+ end
44
+ end
45
+
46
+ # Just an alias for nice API
47
+ #
48
+ # @param args [Array] Anything `#throttling` accepts
49
+ def throttle(**args)
50
+ throttling(**args)
51
+ end
52
+
53
+ # @return [Boolean] is a given job throttled
54
+ def throttling?
55
+ throttling.active?
56
+ end
57
+
58
+ # @return [Hash] topic with all its native configuration options plus throttling
59
+ def to_h
60
+ super.merge(
61
+ throttling: throttling.to_h
62
+ ).freeze
63
+ end
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
69
+ end
@@ -0,0 +1,30 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Routing
17
+ module Features
18
+ # Ability to throttle ingestion of data per topic partition
19
+ # Useful when we have fixed limit of things we can process in a given time period without
20
+ # getting into trouble. It can be used for example to:
21
+ # - make sure we do not insert things to DB too fast
22
+ # - make sure we do not dispatch HTTP requests to external resources too fast
23
+ #
24
+ # This feature is virtual. It materializes itself via the `Filtering` feature.
25
+ class Throttling < Base
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
@@ -10,57 +10,65 @@ module Karafka
10
10
  # listener thread, but we go with thread-safe by default for all not to worry about potential
11
11
  # future mistakes.
12
12
  class Coordinator
13
- # @return [Karafka::TimeTrackers::Pause]
14
- attr_reader :pause_tracker
15
-
16
- attr_reader :seek_offset
13
+ attr_reader :pause_tracker, :seek_offset, :topic, :partition
17
14
 
15
+ # @param topic [Karafka::Routing::Topic]
16
+ # @param partition [Integer]
18
17
  # @param pause_tracker [Karafka::TimeTrackers::Pause] pause tracker for given topic partition
19
- def initialize(pause_tracker)
18
+ def initialize(topic, partition, pause_tracker)
19
+ @topic = topic
20
+ @partition = partition
20
21
  @pause_tracker = pause_tracker
21
22
  @revoked = false
22
23
  @consumptions = {}
23
24
  @running_jobs = 0
24
25
  @manual_pause = false
25
26
  @mutex = Mutex.new
27
+ @marked = false
28
+ @failure = false
26
29
  end
27
30
 
28
31
  # Starts the coordinator for given consumption jobs
29
32
  # @param messages [Array<Karafka::Messages::Message>] batch of message for which we are
30
33
  # going to coordinate work. Not used with regular coordinator.
31
34
  def start(messages)
32
- @mutex.synchronize do
33
- @running_jobs = 0
34
- # We need to clear the consumption results hash here, otherwise we could end up storing
35
- # consumption results of consumer instances we no longer control
36
- @consumptions.clear
37
-
38
- # When starting to run, no pause is expected and no manual pause as well
39
- @manual_pause = false
40
-
41
- # We set it on the first encounter and never again, because then the offset setting
42
- # should be up to the consumers logic (our or the end user)
43
- # Seek offset needs to be always initialized as for case where manual offset management
44
- # is turned on, we need to have reference to the first offset even in case of running
45
- # multiple batches without marking any messages as consumed. Rollback needs to happen to
46
- # the last place we know of or the last message + 1 that was marked
47
- @seek_offset ||= messages.first.offset
48
- end
35
+ @failure = false
36
+ @running_jobs = 0
37
+ # We need to clear the consumption results hash here, otherwise we could end up storing
38
+ # consumption results of consumer instances we no longer control
39
+ @consumptions.clear
40
+
41
+ # When starting to run, no pause is expected and no manual pause as well
42
+ @manual_pause = false
43
+
44
+ # We set it on the first encounter and never again, because then the offset setting
45
+ # should be up to the consumers logic (our or the end user)
46
+ # Seek offset needs to be always initialized as for case where manual offset management
47
+ # is turned on, we need to have reference to the first offset even in case of running
48
+ # multiple batches without marking any messages as consumed. Rollback needs to happen to
49
+ # the last place we know of or the last message + 1 that was marked
50
+ #
51
+ # It is however worth keeping in mind, that this may need to be used with `#marked?` to
52
+ # make sure that the first offset is an offset that has been marked.
53
+ @seek_offset ||= messages.first.offset
49
54
  end
50
55
 
51
56
  # @param offset [Integer] message offset
52
57
  def seek_offset=(offset)
53
- @mutex.synchronize { @seek_offset = offset }
58
+ synchronize do
59
+ @marked = true
60
+ @seek_offset = offset
61
+ end
54
62
  end
55
63
 
56
64
  # Increases number of jobs that we handle with this coordinator
57
65
  def increment
58
- @mutex.synchronize { @running_jobs += 1 }
66
+ synchronize { @running_jobs += 1 }
59
67
  end
60
68
 
61
69
  # Decrements number of jobs we handle at the moment
62
70
  def decrement
63
- @mutex.synchronize do
71
+ synchronize do
64
72
  @running_jobs -= 1
65
73
 
66
74
  return @running_jobs unless @running_jobs.negative?
@@ -72,8 +80,10 @@ module Karafka
72
80
  end
73
81
 
74
82
  # Is all the consumption done and finished successfully for this coordinator
83
+ # We do not say we're successful until all work is done, because running work may still
84
+ # crash.
75
85
  def success?
76
- @mutex.synchronize do
86
+ synchronize do
77
87
  @running_jobs.zero? && @consumptions.values.all?(&:success?)
78
88
  end
79
89
  end
@@ -81,7 +91,7 @@ module Karafka
81
91
  # Mark given consumption on consumer as successful
82
92
  # @param consumer [Karafka::BaseConsumer] consumer that finished successfully
83
93
  def success!(consumer)
84
- @mutex.synchronize do
94
+ synchronize do
85
95
  consumption(consumer).success!
86
96
  end
87
97
  end
@@ -90,11 +100,17 @@ module Karafka
90
100
  # @param consumer [Karafka::BaseConsumer] consumer that failed
91
101
  # @param error [StandardError] error that occurred
92
102
  def failure!(consumer, error)
93
- @mutex.synchronize do
103
+ synchronize do
104
+ @failure = true
94
105
  consumption(consumer).failure!(error)
95
106
  end
96
107
  end
97
108
 
109
+ # @return [Boolean] true if any of work we were running failed
110
+ def failure?
111
+ @failure
112
+ end
113
+
98
114
  # Marks given coordinator for processing group as revoked
99
115
  #
100
116
  # This is invoked in two places:
@@ -105,7 +121,7 @@ module Karafka
105
121
  # listener loop dispatching the revocation job. It is ok, as effectively nothing will be
106
122
  # processed until revocation jobs are done.
107
123
  def revoke
108
- @mutex.synchronize { @revoked = true }
124
+ synchronize { @revoked = true }
109
125
  end
110
126
 
111
127
  # @return [Boolean] is the partition we are processing revoked or not
@@ -113,10 +129,18 @@ module Karafka
113
129
  @revoked
114
130
  end
115
131
 
132
+ # @return [Boolean] was the new seek offset assigned at least once. This is needed because
133
+ # by default we assign seek offset of a first message ever, however this is insufficient
134
+ # for DLQ in a scenario where the first message would be broken. We would never move
135
+ # out of it and would end up in an endless loop.
136
+ def marked?
137
+ @marked
138
+ end
139
+
116
140
  # Store in the coordinator info, that this pause was done manually by the end user and not
117
141
  # by the system itself
118
142
  def manual_pause
119
- @mutex.synchronize { @manual_pause = true }
143
+ @manual_pause = true
120
144
  end
121
145
 
122
146
  # @return [Boolean] are we in a pause that was initiated by the user
@@ -124,6 +148,12 @@ module Karafka
124
148
  @pause_tracker.paused? && @manual_pause
125
149
  end
126
150
 
151
+ # Allows to run synchronized (locked) code that can operate in between virtual partitions
152
+ # @param block [Proc] code we want to run in the synchronized mode
153
+ def synchronize(&block)
154
+ @mutex.synchronize(&block)
155
+ end
156
+
127
157
  private
128
158
 
129
159
  # @param consumer [Object] karafka consumer (normal or pro)
@@ -9,16 +9,20 @@ module Karafka
9
9
  # @note This buffer operates only from the listener loop, thus we do not have to make it
10
10
  # thread-safe.
11
11
  class CoordinatorsBuffer
12
- def initialize
12
+ # @param topics [Karafka::Routing::Topics]
13
+ def initialize(topics)
13
14
  @pauses_manager = Connection::PausesManager.new
14
15
  @coordinator_class = ::Karafka::App.config.internal.processing.coordinator_class
15
16
  @coordinators = Hash.new { |h, k| h[k] = {} }
17
+ @topics = topics
16
18
  end
17
19
 
18
20
  # @param topic [String] topic name
19
21
  # @param partition [Integer] partition number
20
22
  def find_or_create(topic, partition)
21
23
  @coordinators[topic][partition] ||= @coordinator_class.new(
24
+ @topics.find(topic),
25
+ partition,
22
26
  @pauses_manager.fetch(topic, partition)
23
27
  )
24
28
  end
@@ -12,6 +12,10 @@ module Karafka
12
12
  # @note Executors are not removed after partition is revoked. They are not that big and will
13
13
  # be re-used in case of a re-claim
14
14
  class Executor
15
+ extend Forwardable
16
+
17
+ def_delegators :@coordinator, :topic, :partition
18
+
15
19
  # @return [String] unique id that we use to ensure, that we use for state tracking
16
20
  attr_reader :id
17
21
 
@@ -21,20 +25,17 @@ module Karafka
21
25
  # @return [Karafka::Messages::Messages] messages batch
22
26
  attr_reader :messages
23
27
 
24
- # Topic accessibility may be needed for the jobs builder to be able to build a proper job
25
- # based on the topic settings defined by the end user
26
- #
27
- # @return [Karafka::Routing::Topic] topic of this executor
28
- attr_reader :topic
28
+ # @return [Karafka::Processing::Coordinator] coordinator for this executor
29
+ attr_reader :coordinator
29
30
 
30
31
  # @param group_id [String] id of the subscription group to which the executor belongs
31
32
  # @param client [Karafka::Connection::Client] kafka client
32
- # @param topic [Karafka::Routing::Topic] topic for which this executor will run
33
- def initialize(group_id, client, topic)
33
+ # @param coordinator [Karafka::Processing::Coordinator]
34
+ def initialize(group_id, client, coordinator)
34
35
  @id = SecureRandom.hex(6)
35
36
  @group_id = group_id
36
37
  @client = client
37
- @topic = topic
38
+ @coordinator = coordinator
38
39
  end
39
40
 
40
41
  # Allows us to prepare the consumer in the listener thread prior to the job being send to
@@ -42,8 +43,7 @@ module Karafka
42
43
  # queue as it could cause starvation.
43
44
  #
44
45
  # @param messages [Array<Karafka::Messages::Message>]
45
- # @param coordinator [Karafka::Processing::Coordinator] coordinator for processing management
46
- def before_enqueue(messages, coordinator)
46
+ def before_enqueue(messages)
47
47
  # the moment we've received the batch or actually the moment we've enqueued it,
48
48
  # but good enough
49
49
  @enqueued_at = Time.now
@@ -54,12 +54,10 @@ module Karafka
54
54
  # middle state, where re-creation of a consumer instance would occur only sometimes
55
55
  @consumer = nil unless ::Karafka::App.config.consumer_persistence
56
56
 
57
- consumer.coordinator = coordinator
58
-
59
57
  # First we build messages batch...
60
58
  consumer.messages = Messages::Builders::Messages.call(
61
59
  messages,
62
- @topic,
60
+ coordinator.topic,
63
61
  @enqueued_at
64
62
  )
65
63
 
@@ -82,6 +80,13 @@ module Karafka
82
80
  consumer.on_after_consume
83
81
  end
84
82
 
83
+ # Runs consumer idle operations
84
+ # This may include house-keeping or other state management changes that can occur but that
85
+ # not mean there are any new messages available for the end user to process
86
+ def idle
87
+ consumer.on_idle
88
+ end
89
+
85
90
  # Runs the controller `#revoked` method that should be triggered when a given consumer is
86
91
  # no longer needed due to partitions reassignment.
87
92
  #
@@ -114,15 +119,17 @@ module Karafka
114
119
  # @return [Object] cached consumer instance
115
120
  def consumer
116
121
  @consumer ||= begin
117
- strategy = ::Karafka::App.config.internal.processing.strategy_selector.find(@topic)
122
+ topic = @coordinator.topic
123
+
124
+ strategy = ::Karafka::App.config.internal.processing.strategy_selector.find(topic)
118
125
 
119
- consumer = @topic.consumer_class.new
126
+ consumer = topic.consumer_class.new
120
127
  # We use singleton class as the same consumer class may be used to process different
121
128
  # topics with different settings
122
129
  consumer.singleton_class.include(strategy)
123
- consumer.topic = @topic
124
130
  consumer.client = @client
125
131
  consumer.producer = ::Karafka::App.producer
132
+ consumer.coordinator = @coordinator
126
133
 
127
134
  consumer
128
135
  end