karafka 2.5.5 → 2.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (215) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/LICENSE-COMM +4 -0
  4. data/README.md +2 -2
  5. data/certs/expired.txt +2 -0
  6. data/karafka.gemspec +23 -23
  7. data/lib/active_job/karafka.rb +2 -2
  8. data/lib/active_job/queue_adapters/karafka_adapter.rb +5 -5
  9. data/lib/karafka/active_job/consumer.rb +3 -3
  10. data/lib/karafka/active_job/current_attributes.rb +4 -4
  11. data/lib/karafka/active_job/job_options_contract.rb +2 -2
  12. data/lib/karafka/admin/acl.rb +3 -3
  13. data/lib/karafka/admin/configs/resource.rb +1 -1
  14. data/lib/karafka/admin/configs.rb +1 -1
  15. data/lib/karafka/admin/consumer_groups.rb +8 -8
  16. data/lib/karafka/admin/contracts/replication.rb +2 -2
  17. data/lib/karafka/admin/replication.rb +21 -21
  18. data/lib/karafka/admin/topics.rb +6 -6
  19. data/lib/karafka/admin.rb +4 -5
  20. data/lib/karafka/app.rb +3 -3
  21. data/lib/karafka/base_consumer.rb +34 -30
  22. data/lib/karafka/cli/base.rb +8 -8
  23. data/lib/karafka/cli/console.rb +1 -1
  24. data/lib/karafka/cli/contracts/server.rb +12 -12
  25. data/lib/karafka/cli/help.rb +2 -2
  26. data/lib/karafka/cli/info.rb +4 -4
  27. data/lib/karafka/cli/install.rb +11 -11
  28. data/lib/karafka/cli/server.rb +6 -6
  29. data/lib/karafka/cli/swarm.rb +1 -1
  30. data/lib/karafka/cli/topics/align.rb +4 -4
  31. data/lib/karafka/cli/topics/base.rb +5 -5
  32. data/lib/karafka/cli/topics/create.rb +2 -2
  33. data/lib/karafka/cli/topics/delete.rb +2 -2
  34. data/lib/karafka/cli/topics/help.rb +5 -1
  35. data/lib/karafka/cli/topics/plan.rb +16 -16
  36. data/lib/karafka/cli/topics/repartition.rb +3 -3
  37. data/lib/karafka/cli/topics.rb +22 -22
  38. data/lib/karafka/cli.rb +2 -2
  39. data/lib/karafka/connection/client.rb +17 -17
  40. data/lib/karafka/connection/listener.rb +6 -6
  41. data/lib/karafka/connection/mode.rb +1 -1
  42. data/lib/karafka/connection/proxy.rb +1 -1
  43. data/lib/karafka/connection/status.rb +2 -2
  44. data/lib/karafka/constraints.rb +3 -3
  45. data/lib/karafka/embedded.rb +3 -3
  46. data/lib/karafka/env.rb +4 -4
  47. data/lib/karafka/errors.rb +6 -1
  48. data/lib/karafka/execution_mode.rb +1 -1
  49. data/lib/karafka/helpers/config_importer.rb +2 -2
  50. data/lib/karafka/helpers/interval_runner.rb +4 -2
  51. data/lib/karafka/helpers/multi_delegator.rb +1 -1
  52. data/lib/karafka/instrumentation/assignments_tracker.rb +9 -9
  53. data/lib/karafka/instrumentation/callbacks/error.rb +5 -5
  54. data/lib/karafka/instrumentation/callbacks/oauthbearer_token_refresh.rb +4 -4
  55. data/lib/karafka/instrumentation/callbacks/rebalance.rb +6 -6
  56. data/lib/karafka/instrumentation/callbacks/statistics.rb +5 -5
  57. data/lib/karafka/instrumentation/logger.rb +7 -7
  58. data/lib/karafka/instrumentation/logger_listener.rb +76 -63
  59. data/lib/karafka/instrumentation/vendors/appsignal/base.rb +1 -1
  60. data/lib/karafka/instrumentation/vendors/appsignal/client.rb +1 -1
  61. data/lib/karafka/instrumentation/vendors/appsignal/errors_listener.rb +1 -1
  62. data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +36 -36
  63. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +33 -28
  64. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +38 -38
  65. data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +5 -5
  66. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +1 -1
  67. data/lib/karafka/instrumentation/vendors/kubernetes/swarm_liveness_listener.rb +1 -1
  68. data/lib/karafka/licenser.rb +115 -8
  69. data/lib/karafka/messages/builders/batch_metadata.rb +4 -2
  70. data/lib/karafka/messages/messages.rb +1 -1
  71. data/lib/karafka/patches/rdkafka/bindings.rb +2 -2
  72. data/lib/karafka/pro/active_job/job_options_contract.rb +2 -2
  73. data/lib/karafka/pro/cleaner/messages/messages.rb +10 -0
  74. data/lib/karafka/pro/cli/contracts/server.rb +12 -12
  75. data/lib/karafka/pro/cli/parallel_segments/base.rb +4 -4
  76. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +5 -5
  77. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +3 -3
  78. data/lib/karafka/pro/cli/parallel_segments.rb +7 -7
  79. data/lib/karafka/pro/cli/topics/health.rb +162 -0
  80. data/lib/karafka/pro/cli/topics.rb +52 -0
  81. data/lib/karafka/pro/connection/manager.rb +14 -14
  82. data/lib/karafka/pro/encryption/contracts/config.rb +2 -2
  83. data/lib/karafka/pro/encryption/messages/middleware.rb +2 -2
  84. data/lib/karafka/pro/encryption/messages/parser.rb +2 -2
  85. data/lib/karafka/pro/encryption/setup/config.rb +2 -2
  86. data/lib/karafka/pro/iterator/tpl_builder.rb +2 -2
  87. data/lib/karafka/pro/iterator.rb +1 -1
  88. data/lib/karafka/pro/loader.rb +2 -1
  89. data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +1 -1
  90. data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +24 -14
  91. data/lib/karafka/pro/processing/filters/base.rb +1 -1
  92. data/lib/karafka/pro/processing/filters/delayer.rb +2 -2
  93. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
  94. data/lib/karafka/pro/processing/offset_metadata/consumer.rb +1 -1
  95. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +6 -6
  96. data/lib/karafka/pro/processing/partitioner.rb +3 -3
  97. data/lib/karafka/pro/processing/periodic_job/consumer.rb +6 -5
  98. data/lib/karafka/pro/processing/piping/consumer.rb +7 -7
  99. data/lib/karafka/pro/processing/schedulers/base.rb +5 -5
  100. data/lib/karafka/pro/processing/schedulers/default.rb +5 -5
  101. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +6 -3
  102. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +6 -3
  103. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +6 -3
  104. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +2 -2
  105. data/lib/karafka/pro/processing/strategies/default.rb +22 -22
  106. data/lib/karafka/pro/processing/strategies/dlq/default.rb +7 -7
  107. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +6 -3
  108. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +6 -3
  109. data/lib/karafka/pro/processing/strategies/ftr/default.rb +2 -2
  110. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -2
  111. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +6 -3
  112. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +6 -3
  113. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +2 -2
  114. data/lib/karafka/pro/recurring_tasks/consumer.rb +2 -2
  115. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -2
  116. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +2 -2
  117. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +2 -2
  118. data/lib/karafka/pro/recurring_tasks/listener.rb +1 -1
  119. data/lib/karafka/pro/recurring_tasks/matcher.rb +2 -2
  120. data/lib/karafka/pro/recurring_tasks/serializer.rb +5 -5
  121. data/lib/karafka/pro/recurring_tasks/setup/config.rb +3 -3
  122. data/lib/karafka/pro/recurring_tasks/task.rb +4 -4
  123. data/lib/karafka/pro/recurring_tasks.rb +4 -4
  124. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +2 -2
  125. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +2 -2
  126. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +1 -1
  127. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +2 -2
  128. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +2 -2
  129. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +2 -2
  130. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +1 -1
  131. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +2 -2
  132. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +2 -2
  133. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +2 -2
  134. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +2 -2
  135. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +1 -1
  136. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +2 -2
  137. data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
  138. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +1 -1
  139. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +2 -2
  140. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +1 -1
  141. data/lib/karafka/pro/routing/features/offset_metadata.rb +1 -1
  142. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +5 -5
  143. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +2 -2
  144. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +2 -2
  145. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +3 -3
  146. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +2 -2
  147. data/lib/karafka/pro/routing/features/patterns/topic.rb +1 -1
  148. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +2 -2
  149. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +2 -2
  150. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
  151. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +7 -7
  152. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +2 -2
  153. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +13 -13
  154. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +2 -2
  155. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +2 -2
  156. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +2 -2
  157. data/lib/karafka/pro/routing/features/swarm.rb +1 -1
  158. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +2 -2
  159. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +7 -7
  160. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +2 -2
  161. data/lib/karafka/pro/scheduled_messages/consumer.rb +4 -4
  162. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -2
  163. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +10 -10
  164. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +2 -2
  165. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +4 -4
  166. data/lib/karafka/pro/scheduled_messages/dispatcher.rb +5 -5
  167. data/lib/karafka/pro/scheduled_messages/proxy.rb +8 -8
  168. data/lib/karafka/pro/scheduled_messages/schema_validator.rb +1 -1
  169. data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
  170. data/lib/karafka/pro/scheduled_messages/state.rb +1 -1
  171. data/lib/karafka/pro/scheduled_messages/tracker.rb +2 -2
  172. data/lib/karafka/pro/scheduled_messages.rb +2 -2
  173. data/lib/karafka/pro/swarm/liveness_listener.rb +2 -2
  174. data/lib/karafka/process.rb +1 -1
  175. data/lib/karafka/processing/coordinator.rb +1 -1
  176. data/lib/karafka/processing/inline_insights/consumer.rb +4 -4
  177. data/lib/karafka/processing/inline_insights/tracker.rb +6 -6
  178. data/lib/karafka/processing/jobs/base.rb +6 -4
  179. data/lib/karafka/processing/jobs_queue.rb +10 -0
  180. data/lib/karafka/processing/schedulers/default.rb +4 -4
  181. data/lib/karafka/processing/strategies/base.rb +6 -6
  182. data/lib/karafka/processing/strategies/default.rb +13 -13
  183. data/lib/karafka/processing/strategies/dlq.rb +1 -1
  184. data/lib/karafka/processing/worker.rb +5 -5
  185. data/lib/karafka/railtie.rb +11 -11
  186. data/lib/karafka/routing/builder.rb +3 -3
  187. data/lib/karafka/routing/contracts/consumer_group.rb +6 -6
  188. data/lib/karafka/routing/contracts/routing.rb +2 -2
  189. data/lib/karafka/routing/contracts/topic.rb +4 -4
  190. data/lib/karafka/routing/features/active_job/contracts/topic.rb +3 -3
  191. data/lib/karafka/routing/features/base/expander.rb +4 -4
  192. data/lib/karafka/routing/features/base.rb +8 -8
  193. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +2 -2
  194. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +2 -2
  195. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +2 -2
  196. data/lib/karafka/routing/features/eofed/contracts/topic.rb +3 -3
  197. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +2 -2
  198. data/lib/karafka/routing/features/inline_insights.rb +7 -7
  199. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +2 -2
  200. data/lib/karafka/routing/subscription_group.rb +9 -9
  201. data/lib/karafka/runner.rb +3 -3
  202. data/lib/karafka/server.rb +14 -5
  203. data/lib/karafka/setup/attributes_map.rb +7 -7
  204. data/lib/karafka/setup/config.rb +11 -11
  205. data/lib/karafka/setup/contracts/config.rb +2 -2
  206. data/lib/karafka/setup/defaults_injector.rb +11 -11
  207. data/lib/karafka/swarm/manager.rb +6 -6
  208. data/lib/karafka/swarm/node.rb +8 -37
  209. data/lib/karafka/swarm/producer_replacer.rb +110 -0
  210. data/lib/karafka/swarm/supervisor.rb +9 -6
  211. data/lib/karafka/swarm.rb +1 -1
  212. data/lib/karafka/time_trackers/pause.rb +1 -1
  213. data/lib/karafka/version.rb +1 -1
  214. data/lib/karafka.rb +36 -36
  215. metadata +7 -3
@@ -56,16 +56,16 @@ module Karafka
56
56
  # user flow but should mitigate damages by not virtualizing
57
57
  begin
58
58
  groupings = vps.distributor.call(messages)
59
- rescue StandardError => e
59
+ rescue => e
60
60
  # This should not happen. If you are seeing this it means your partitioner code
61
61
  # failed and raised an error. We highly recommend mitigating partitioner level errors
62
62
  # on the user side because this type of collapse should be considered a last resort
63
63
  Karafka.monitor.instrument(
64
- 'error.occurred',
64
+ "error.occurred",
65
65
  caller: self,
66
66
  error: e,
67
67
  messages: messages,
68
- type: 'virtual_partitions.partitioner.error'
68
+ type: "virtual_partitions.partitioner.error"
69
69
  )
70
70
 
71
71
  groupings = { 0 => messages }
@@ -40,12 +40,13 @@ module Karafka
40
40
  #
41
41
  # We need to check both cases (public and private) since user is not expected to
42
42
  # have this method public
43
- return if consumer_singleton_class.instance_methods.include?(:tick)
43
+ return if consumer_singleton_class.method_defined?(:tick)
44
44
  return if consumer_singleton_class.private_instance_methods.include?(:tick)
45
45
 
46
46
  # Create empty ticking method
47
47
  consumer_singleton_class.class_eval do
48
- def tick; end
48
+ def tick
49
+ end
49
50
  end
50
51
  end
51
52
  end
@@ -61,12 +62,12 @@ module Karafka
61
62
  # @private
62
63
  def on_tick
63
64
  handle_tick
64
- rescue StandardError => e
65
+ rescue => e
65
66
  Karafka.monitor.instrument(
66
- 'error.occurred',
67
+ "error.occurred",
67
68
  error: e,
68
69
  caller: self,
69
- type: 'consumer.tick.error'
70
+ type: "consumer.tick.error"
70
71
  )
71
72
  end
72
73
  end
@@ -107,20 +107,20 @@ module Karafka
107
107
  topic: topic,
108
108
  payload: message.raw_payload,
109
109
  headers: message.raw_headers.merge(
110
- 'source_topic' => message.topic,
111
- 'source_partition' => message.partition.to_s,
112
- 'source_offset' => message.offset.to_s,
113
- 'source_consumer_group' => self.topic.consumer_group.id
110
+ "source_topic" => message.topic,
111
+ "source_partition" => message.partition.to_s,
112
+ "source_offset" => message.offset.to_s,
113
+ "source_consumer_group" => self.topic.consumer_group.id
114
114
  )
115
115
  }
116
116
 
117
117
  # Use a key only if key was provided
118
- if message.raw_key
119
- pipe_message[:key] = message.raw_key
118
+ pipe_message[:key] = if message.raw_key
119
+ message.raw_key
120
120
  # Otherwise pipe creating a key that will assign it based on the source partition
121
121
  # number
122
122
  else
123
- pipe_message[:key] = message.partition.to_s
123
+ message.partition.to_s
124
124
  end
125
125
 
126
126
  # Optional method user can define in consumer to enhance the dlq message hash with
@@ -65,7 +65,7 @@ module Karafka
65
65
  # [Array<Karafka::Processing::Jobs::Consume, Processing::Jobs::ConsumeNonBlocking>]
66
66
  # jobs for scheduling
67
67
  def schedule_consumption(_jobs_array)
68
- raise NotImplementedError, 'Implement in a subclass'
68
+ raise NotImplementedError, "Implement in a subclass"
69
69
  end
70
70
 
71
71
  # Runs the revocation jobs scheduling flow under a mutex
@@ -109,10 +109,10 @@ module Karafka
109
109
  end
110
110
 
111
111
  # Schedule by default all except consumption as fifo
112
- alias schedule_revocation schedule_fifo
113
- alias schedule_shutdown schedule_fifo
114
- alias schedule_idle schedule_fifo
115
- alias schedule_periodic schedule_fifo
112
+ alias_method :schedule_revocation, :schedule_fifo
113
+ alias_method :schedule_shutdown, :schedule_fifo
114
+ alias_method :schedule_idle, :schedule_fifo
115
+ alias_method :schedule_periodic, :schedule_fifo
116
116
 
117
117
  # Runs the manage tick under mutex
118
118
  def on_manage
@@ -71,11 +71,11 @@ module Karafka
71
71
  end
72
72
 
73
73
  # By default all non-consumption work is scheduled in a fifo order
74
- alias on_schedule_revocation schedule_fifo
75
- alias on_schedule_shutdown schedule_fifo
76
- alias on_schedule_idle schedule_fifo
77
- alias on_schedule_periodic schedule_fifo
78
- alias on_schedule_eofed schedule_fifo
74
+ alias_method :on_schedule_revocation, :schedule_fifo
75
+ alias_method :on_schedule_shutdown, :schedule_fifo
76
+ alias_method :on_schedule_idle, :schedule_fifo
77
+ alias_method :on_schedule_periodic, :schedule_fifo
78
+ alias_method :on_schedule_eofed, :schedule_fifo
79
79
 
80
80
  # This scheduler does not have anything to manage as it is a pass through and has no
81
81
  # state
@@ -52,14 +52,17 @@ module Karafka
52
52
 
53
53
  if coordinator.filtered? && !revoked?
54
54
  handle_post_filtering
55
+
56
+ # :seek and :pause are fully handled by handle_post_filtering
57
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
58
+ return unless coordinator.filter.action == :skip
55
59
  elsif !revoked?
56
60
  # no need to check for manual seek because AJ consumer is internal and
57
61
  # fully controlled by us
58
62
  seek(seek_offset, false, reset_offset: false)
59
- resume
60
- else
61
- resume
62
63
  end
64
+
65
+ resume
63
66
  else
64
67
  apply_dlq_flow do
65
68
  skippable_message, = find_skippable_message
@@ -58,14 +58,17 @@ module Karafka
58
58
 
59
59
  if coordinator.filtered? && !revoked?
60
60
  handle_post_filtering
61
+
62
+ # :seek and :pause are fully handled by handle_post_filtering
63
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
64
+ return unless coordinator.filter.action == :skip
61
65
  elsif !revoked?
62
66
  # no need to check for manual seek because AJ consumer is internal and
63
67
  # fully controlled by us
64
68
  seek(seek_offset, false, reset_offset: false)
65
- resume
66
- else
67
- resume
68
69
  end
70
+
71
+ resume
69
72
  else
70
73
  apply_dlq_flow do
71
74
  skippable_message, = find_skippable_message
@@ -54,14 +54,17 @@ module Karafka
54
54
 
55
55
  if coordinator.filtered? && !revoked?
56
56
  handle_post_filtering
57
+
58
+ # :seek and :pause are fully handled by handle_post_filtering
59
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
60
+ return unless coordinator.filter.action == :skip
57
61
  elsif !revoked?
58
62
  # no need to check for manual seek because AJ consumer is internal and
59
63
  # fully controlled by us
60
64
  seek(seek_offset, false, reset_offset: false)
61
- resume
62
- else
63
- resume
64
65
  end
66
+
67
+ resume
65
68
  else
66
69
  retry_after_pause
67
70
  end
@@ -78,8 +78,8 @@ module Karafka
78
78
  coordinator.revoke
79
79
  end
80
80
 
81
- monitor.instrument('consumer.revoke', caller: self)
82
- monitor.instrument('consumer.revoked', caller: self) do
81
+ monitor.instrument("consumer.revoke", caller: self)
82
+ monitor.instrument("consumer.revoked", caller: self) do
83
83
  revoked
84
84
  end
85
85
  ensure
@@ -82,12 +82,12 @@ module Karafka
82
82
  # If we are not inside a transaction but this is a transactional topic, we mark with
83
83
  # artificially created transaction
84
84
  stored = if producer.transactional?
85
- mark_with_transaction(message, offset_metadata, true)
86
- elsif @_transactional_marking
87
- raise Errors::NonTransactionalMarkingAttemptError
88
- else
89
- client.mark_as_consumed(message, offset_metadata)
90
- end
85
+ mark_with_transaction(message, offset_metadata, true)
86
+ elsif @_transactional_marking
87
+ raise Errors::NonTransactionalMarkingAttemptError
88
+ else
89
+ client.mark_as_consumed(message, offset_metadata)
90
+ end
91
91
 
92
92
  return revoked? unless stored
93
93
 
@@ -124,12 +124,12 @@ module Karafka
124
124
  # If we are not inside a transaction but this is a transactional topic, we mark with
125
125
  # artificially created transaction
126
126
  stored = if producer.transactional?
127
- mark_with_transaction(message, offset_metadata, false)
128
- elsif @_transactional_marking
129
- raise Errors::NonTransactionalMarkingAttemptError
130
- else
131
- client.mark_as_consumed!(message, offset_metadata)
132
- end
127
+ mark_with_transaction(message, offset_metadata, false)
128
+ elsif @_transactional_marking
129
+ raise Errors::NonTransactionalMarkingAttemptError
130
+ else
131
+ client.mark_as_consumed!(message, offset_metadata)
132
+ end
133
133
 
134
134
  return revoked? unless stored
135
135
 
@@ -165,7 +165,7 @@ module Karafka
165
165
  default_producer = nil
166
166
  transaction_started = nil
167
167
 
168
- monitor.instrument('consumer.consuming.transaction', caller: self) do
168
+ monitor.instrument("consumer.consuming.transaction", caller: self) do
169
169
  default_producer = producer
170
170
  self.producer = active_producer
171
171
 
@@ -311,7 +311,7 @@ module Karafka
311
311
 
312
312
  # No actions needed for the standard flow here
313
313
  def handle_before_schedule_consume
314
- monitor.instrument('consumer.before_schedule_consume', caller: self)
314
+ monitor.instrument("consumer.before_schedule_consume", caller: self)
315
315
 
316
316
  nil
317
317
  end
@@ -330,15 +330,15 @@ module Karafka
330
330
  # This can happen primarily when an LRJ job gets to the internal worker queue and
331
331
  # this partition is revoked prior processing.
332
332
  unless revoked?
333
- monitor.instrument('consumer.consume', caller: self)
334
- monitor.instrument('consumer.consumed', caller: self) do
333
+ monitor.instrument("consumer.consume", caller: self)
334
+ monitor.instrument("consumer.consumed", caller: self) do
335
335
  consume
336
336
  end
337
337
  end
338
338
 
339
339
  # Mark job as successful
340
340
  coordinator.success!(self)
341
- rescue StandardError => e
341
+ rescue => e
342
342
  # If failed, mark as failed
343
343
  coordinator.failure!(self, e)
344
344
 
@@ -377,8 +377,8 @@ module Karafka
377
377
  coordinator.revoke
378
378
  end
379
379
 
380
- monitor.instrument('consumer.revoke', caller: self)
381
- monitor.instrument('consumer.revoked', caller: self) do
380
+ monitor.instrument("consumer.revoke", caller: self)
381
+ monitor.instrument("consumer.revoked", caller: self) do
382
382
  revoked
383
383
  end
384
384
  ensure
@@ -387,15 +387,15 @@ module Karafka
387
387
 
388
388
  # No action needed for the tick standard flow
389
389
  def handle_before_schedule_tick
390
- monitor.instrument('consumer.before_schedule_tick', caller: self)
390
+ monitor.instrument("consumer.before_schedule_tick", caller: self)
391
391
 
392
392
  nil
393
393
  end
394
394
 
395
395
  # Runs the consumer `#tick` method with reporting
396
396
  def handle_tick
397
- monitor.instrument('consumer.tick', caller: self)
398
- monitor.instrument('consumer.ticked', caller: self) do
397
+ monitor.instrument("consumer.tick", caller: self)
398
+ monitor.instrument("consumer.ticked", caller: self) do
399
399
  tick
400
400
  end
401
401
  ensure
@@ -129,7 +129,7 @@ module Karafka
129
129
 
130
130
  # Notify about dispatch on the events bus
131
131
  monitor.instrument(
132
- 'dead_letter_queue.dispatched',
132
+ "dead_letter_queue.dispatched",
133
133
  caller: self,
134
134
  message: skippable_message
135
135
  )
@@ -170,12 +170,12 @@ module Karafka
170
170
  partition_key: source_partition,
171
171
  payload: skippable_message.raw_payload,
172
172
  headers: skippable_message.raw_headers.merge(
173
- 'source_topic' => topic.name,
174
- 'source_partition' => source_partition,
175
- 'source_offset' => skippable_message.offset.to_s,
176
- 'source_consumer_group' => topic.consumer_group.id,
177
- 'source_attempts' => attempt.to_s,
178
- 'source_trace_id' => errors_tracker.trace_id
173
+ "source_topic" => topic.name,
174
+ "source_partition" => source_partition,
175
+ "source_offset" => skippable_message.offset.to_s,
176
+ "source_consumer_group" => topic.consumer_group.id,
177
+ "source_attempts" => attempt.to_s,
178
+ "source_trace_id" => errors_tracker.trace_id
179
179
  )
180
180
  }
181
181
 
@@ -56,12 +56,15 @@ module Karafka
56
56
 
57
57
  if coordinator.filtered? && !revoked?
58
58
  handle_post_filtering
59
+
60
+ # :seek and :pause are fully handled by handle_post_filtering
61
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
62
+ return unless coordinator.filter.action == :skip
59
63
  elsif !revoked? && !coordinator.manual_seek?
60
64
  seek(seek_offset, false, reset_offset: false)
61
- resume
62
- else
63
- resume
64
65
  end
66
+
67
+ resume
65
68
  else
66
69
  apply_dlq_flow do
67
70
  return resume if revoked?
@@ -51,12 +51,15 @@ module Karafka
51
51
 
52
52
  if coordinator.filtered? && !revoked?
53
53
  handle_post_filtering
54
+
55
+ # :seek and :pause are fully handled by handle_post_filtering
56
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
57
+ return unless coordinator.filter.action == :skip
54
58
  elsif !revoked? && !coordinator.manual_seek?
55
59
  seek(last_group_message.offset + 1, false, reset_offset: false)
56
- resume
57
- else
58
- resume
59
60
  end
61
+
62
+ resume
60
63
  else
61
64
  apply_dlq_flow do
62
65
  return resume if revoked?
@@ -99,7 +99,7 @@ module Karafka
99
99
  throttle_message = filter.cursor
100
100
 
101
101
  monitor.instrument(
102
- 'filtering.seek',
102
+ "filtering.seek",
103
103
  caller: self,
104
104
  message: throttle_message
105
105
  ) do
@@ -114,7 +114,7 @@ module Karafka
114
114
  throttle_message = filter.cursor
115
115
 
116
116
  monitor.instrument(
117
- 'filtering.throttled',
117
+ "filtering.throttled",
118
118
  caller: self,
119
119
  message: throttle_message,
120
120
  timeout: throttle_timeout
@@ -84,8 +84,8 @@ module Karafka
84
84
  coordinator.revoke
85
85
  end
86
86
 
87
- monitor.instrument('consumer.revoke', caller: self)
88
- monitor.instrument('consumer.revoked', caller: self) do
87
+ monitor.instrument("consumer.revoke", caller: self)
88
+ monitor.instrument("consumer.revoked", caller: self) do
89
89
  revoked
90
90
  end
91
91
  ensure
@@ -54,14 +54,17 @@ module Karafka
54
54
  # If still not revoked and was throttled, we need to apply throttling logic
55
55
  if coordinator.filtered? && !revoked?
56
56
  handle_post_filtering
57
+
58
+ # :seek and :pause are fully handled by handle_post_filtering
59
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
60
+ return unless coordinator.filter.action == :skip
57
61
  elsif !revoked? && !coordinator.manual_seek?
58
62
  # If not revoked and not throttled, we move to where we were suppose to and
59
63
  # resume
60
64
  seek(seek_offset, false, reset_offset: false)
61
- resume
62
- else
63
- resume
64
65
  end
66
+
67
+ resume
65
68
  else
66
69
  # If processing failed, we need to pause
67
70
  # For long running job this will overwrite the default never-ending pause and
@@ -52,14 +52,17 @@ module Karafka
52
52
  # If still not revoked and was throttled, we need to apply filtering logic
53
53
  if coordinator.filtered? && !revoked?
54
54
  handle_post_filtering
55
+
56
+ # :seek and :pause are fully handled by handle_post_filtering
57
+ # For :skip we still need to resume the LRJ MAX_PAUSE_TIME pause
58
+ return unless coordinator.filter.action == :skip
55
59
  elsif !revoked? && !coordinator.manual_seek?
56
60
  # If not revoked and not throttled, we move to where we were suppose to and
57
61
  # resume
58
62
  seek(last_group_message.offset + 1, false, reset_offset: false)
59
- resume
60
- else
61
- resume
62
63
  end
64
+
65
+ resume
63
66
  else
64
67
  # If processing failed, we need to pause
65
68
  # For long running job this will overwrite the default never-ending pause and
@@ -73,8 +73,8 @@ module Karafka
73
73
  coordinator.revoke
74
74
  end
75
75
 
76
- monitor.instrument('consumer.revoke', caller: self)
77
- monitor.instrument('consumer.revoked', caller: self) do
76
+ monitor.instrument("consumer.revoke", caller: self)
77
+ monitor.instrument("consumer.revoked", caller: self) do
78
78
  revoked
79
79
  end
80
80
  ensure
@@ -47,7 +47,7 @@ module Karafka
47
47
  type = payload[:type]
48
48
 
49
49
  case type
50
- when 'schedule'
50
+ when "schedule"
51
51
  # If we're replaying data, we need to record the most recent stored state, so we
52
52
  # can use this data to fully initialize the scheduler
53
53
  @executor.update_state(payload) if @executor.replaying?
@@ -62,7 +62,7 @@ module Karafka
62
62
  partition,
63
63
  message.offset - 1
64
64
  )
65
- when 'command'
65
+ when "command"
66
66
  @executor.apply_command(payload)
67
67
 
68
68
  next if @executor.replaying?
@@ -29,8 +29,8 @@ module Karafka
29
29
  class Config < Karafka::Contracts::Base
30
30
  configure do |config|
31
31
  config.error_messages = YAML.safe_load_file(
32
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
33
- ).fetch('en').fetch('validations').fetch('setup').fetch('config')
32
+ File.join(Karafka.gem_root, "config", "locales", "pro_errors.yml")
33
+ ).fetch("en").fetch("validations").fetch("setup").fetch("config")
34
34
  end
35
35
 
36
36
  nested(:recurring_tasks) do
@@ -29,8 +29,8 @@ module Karafka
29
29
  class Task < Karafka::Contracts::Base
30
30
  configure do |config|
31
31
  config.error_messages = YAML.safe_load_file(
32
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
33
- ).fetch('en').fetch('validations').fetch('recurring_tasks')
32
+ File.join(Karafka.gem_root, "config", "locales", "pro_errors.yml")
33
+ ).fetch("en").fetch("validations").fetch("recurring_tasks")
34
34
  end
35
35
 
36
36
  # Regexp to ensure all tasks ids are URL safe
@@ -34,7 +34,7 @@ module Karafka
34
34
  def schedule
35
35
  produce(
36
36
  topics.schedules.name,
37
- 'state:schedule',
37
+ "state:schedule",
38
38
  serializer.schedule(::Karafka::Pro::RecurringTasks.schedule)
39
39
  )
40
40
  end
@@ -87,7 +87,7 @@ module Karafka
87
87
  key: key,
88
88
  partition: 0,
89
89
  payload: payload,
90
- headers: { 'zlib' => 'true' }
90
+ headers: { "zlib" => "true" }
91
91
  )
92
92
  end
93
93
  end
@@ -37,7 +37,7 @@ module Karafka
37
37
  # are independent. It is not to replace the Web UI tracking but to just log failed
38
38
  # executions in the same way as successful but just with the failure as an outcome.
39
39
  def on_error_occurred(event)
40
- return unless event[:type] == 'recurring_tasks.task.execute.error'
40
+ return unless event[:type] == "recurring_tasks.task.execute.error"
41
41
 
42
42
  Dispatcher.log(event)
43
43
  end
@@ -30,10 +30,10 @@ module Karafka
30
30
  # @return [Boolean] is this message dedicated to current process and is actionable
31
31
  def matches?(task, payload)
32
32
  # We only match commands
33
- return false unless payload[:type] == 'command'
33
+ return false unless payload[:type] == "command"
34
34
 
35
35
  # * is a wildcard to match all for batch commands
36
- return false unless payload[:task][:id] == '*' || payload[:task][:id] == task.id
36
+ return false unless payload[:task][:id] == "*" || payload[:task][:id] == task.id
37
37
 
38
38
  # Ignore messages that have different schema. This can happen in the middle of
39
39
  # upgrades of the framework. We ignore this not to risk compatibility issues
@@ -26,7 +26,7 @@ module Karafka
26
26
  # Converts schedule command and log details into data we can dispatch to Kafka.
27
27
  class Serializer
28
28
  # Current recurring tasks related schema structure
29
- SCHEMA_VERSION = '1.0'
29
+ SCHEMA_VERSION = "1.0"
30
30
 
31
31
  # Serializes and compresses the schedule with all its tasks and their execution state
32
32
  # @param schedule [Karafka::Pro::RecurringTasks::Schedule]
@@ -49,7 +49,7 @@ module Karafka
49
49
  schema_version: SCHEMA_VERSION,
50
50
  schedule_version: schedule.version,
51
51
  dispatched_at: Time.now.to_f,
52
- type: 'schedule',
52
+ type: "schedule",
53
53
  tasks: tasks
54
54
  }
55
55
 
@@ -66,7 +66,7 @@ module Karafka
66
66
  schema_version: SCHEMA_VERSION,
67
67
  schedule_version: Karafka::Pro::RecurringTasks.schedule.version,
68
68
  dispatched_at: Time.now.to_f,
69
- type: 'command',
69
+ type: "command",
70
70
  command: {
71
71
  name: command_name
72
72
  },
@@ -89,11 +89,11 @@ module Karafka
89
89
  schema_version: SCHEMA_VERSION,
90
90
  schedule_version: Karafka::Pro::RecurringTasks.schedule.version,
91
91
  dispatched_at: Time.now.to_f,
92
- type: 'log',
92
+ type: "log",
93
93
  task: {
94
94
  id: task.id,
95
95
  time_taken: event.payload[:time] || -1,
96
- result: event.payload.key?(:error) ? 'failure' : 'success'
96
+ result: event.payload.key?(:error) ? "failure" : "success"
97
97
  }
98
98
  }
99
99
 
@@ -31,7 +31,7 @@ module Karafka
31
31
 
32
32
  setting(:consumer_class, default: Consumer)
33
33
  setting(:deserializer, default: Deserializer.new)
34
- setting(:group_id, default: 'karafka_recurring_tasks')
34
+ setting(:group_id, default: "karafka_recurring_tasks")
35
35
  # By default we will run the scheduling every 15 seconds since we provide a minute-based
36
36
  # precision
37
37
  setting(:interval, default: 15_000)
@@ -50,11 +50,11 @@ module Karafka
50
50
 
51
51
  setting(:topics) do
52
52
  setting(:schedules) do
53
- setting(:name, default: 'karafka_recurring_tasks_schedules')
53
+ setting(:name, default: "karafka_recurring_tasks_schedules")
54
54
  end
55
55
 
56
56
  setting(:logs) do
57
- setting(:name, default: 'karafka_recurring_tasks_logs')
57
+ setting(:name, default: "karafka_recurring_tasks_logs")
58
58
  end
59
59
  end
60
60
 
@@ -103,7 +103,7 @@ module Karafka
103
103
  # Executes the given task and publishes appropriate notification bus events.
104
104
  def call
105
105
  monitor.instrument(
106
- 'recurring_tasks.task.executed',
106
+ "recurring_tasks.task.executed",
107
107
  task: self
108
108
  ) do
109
109
  # We check for presence of the `@executable` because user can define cron schedule
@@ -112,13 +112,13 @@ module Karafka
112
112
 
113
113
  execute
114
114
  end
115
- rescue StandardError => e
115
+ rescue => e
116
116
  monitor.instrument(
117
- 'error.occurred',
117
+ "error.occurred",
118
118
  caller: self,
119
119
  error: e,
120
120
  task: self,
121
- type: 'recurring_tasks.task.execute.error'
121
+ type: "recurring_tasks.task.execute.error"
122
122
  )
123
123
  ensure
124
124
  @trigger = false