sidekiq 7.1.6 → 7.3.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +184 -0
  3. data/README.md +3 -3
  4. data/bin/multi_queue_bench +271 -0
  5. data/bin/sidekiqload +21 -12
  6. data/lib/active_job/queue_adapters/sidekiq_adapter.rb +75 -0
  7. data/lib/generators/sidekiq/job_generator.rb +2 -0
  8. data/lib/sidekiq/api.rb +139 -44
  9. data/lib/sidekiq/capsule.rb +8 -3
  10. data/lib/sidekiq/cli.rb +4 -1
  11. data/lib/sidekiq/client.rb +21 -1
  12. data/lib/sidekiq/component.rb +22 -0
  13. data/lib/sidekiq/config.rb +31 -7
  14. data/lib/sidekiq/deploy.rb +4 -2
  15. data/lib/sidekiq/embedded.rb +2 -0
  16. data/lib/sidekiq/fetch.rb +1 -1
  17. data/lib/sidekiq/iterable_job.rb +55 -0
  18. data/lib/sidekiq/job/interrupt_handler.rb +24 -0
  19. data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
  20. data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
  21. data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
  22. data/lib/sidekiq/job/iterable.rb +294 -0
  23. data/lib/sidekiq/job.rb +14 -3
  24. data/lib/sidekiq/job_logger.rb +7 -6
  25. data/lib/sidekiq/job_retry.rb +9 -4
  26. data/lib/sidekiq/job_util.rb +2 -0
  27. data/lib/sidekiq/launcher.rb +7 -5
  28. data/lib/sidekiq/logger.rb +1 -1
  29. data/lib/sidekiq/metrics/query.rb +6 -1
  30. data/lib/sidekiq/metrics/shared.rb +15 -4
  31. data/lib/sidekiq/metrics/tracking.rb +20 -8
  32. data/lib/sidekiq/middleware/current_attributes.rb +46 -13
  33. data/lib/sidekiq/middleware/modules.rb +2 -0
  34. data/lib/sidekiq/monitor.rb +2 -1
  35. data/lib/sidekiq/paginator.rb +8 -2
  36. data/lib/sidekiq/processor.rb +21 -11
  37. data/lib/sidekiq/rails.rb +19 -3
  38. data/lib/sidekiq/redis_client_adapter.rb +24 -5
  39. data/lib/sidekiq/redis_connection.rb +36 -8
  40. data/lib/sidekiq/ring_buffer.rb +2 -0
  41. data/lib/sidekiq/scheduled.rb +2 -2
  42. data/lib/sidekiq/systemd.rb +2 -0
  43. data/lib/sidekiq/testing.rb +14 -8
  44. data/lib/sidekiq/transaction_aware_client.rb +7 -0
  45. data/lib/sidekiq/version.rb +5 -1
  46. data/lib/sidekiq/web/action.rb +26 -4
  47. data/lib/sidekiq/web/application.rb +53 -64
  48. data/lib/sidekiq/web/csrf_protection.rb +8 -5
  49. data/lib/sidekiq/web/helpers.rb +73 -27
  50. data/lib/sidekiq/web/router.rb +5 -2
  51. data/lib/sidekiq/web.rb +54 -2
  52. data/lib/sidekiq.rb +5 -3
  53. data/sidekiq.gemspec +3 -2
  54. data/web/assets/javascripts/application.js +26 -0
  55. data/web/assets/javascripts/dashboard-charts.js +37 -11
  56. data/web/assets/javascripts/dashboard.js +14 -10
  57. data/web/assets/javascripts/metrics.js +34 -0
  58. data/web/assets/stylesheets/application-rtl.css +10 -0
  59. data/web/assets/stylesheets/application.css +38 -3
  60. data/web/locales/en.yml +3 -1
  61. data/web/locales/fr.yml +0 -1
  62. data/web/locales/gd.yml +0 -1
  63. data/web/locales/it.yml +32 -1
  64. data/web/locales/ja.yml +0 -1
  65. data/web/locales/pt-br.yml +1 -2
  66. data/web/locales/tr.yml +100 -0
  67. data/web/locales/uk.yml +24 -1
  68. data/web/locales/zh-cn.yml +0 -1
  69. data/web/locales/zh-tw.yml +0 -1
  70. data/web/views/_footer.erb +12 -1
  71. data/web/views/_metrics_period_select.erb +1 -1
  72. data/web/views/_summary.erb +7 -7
  73. data/web/views/busy.erb +7 -7
  74. data/web/views/dashboard.erb +29 -36
  75. data/web/views/filtering.erb +4 -5
  76. data/web/views/layout.erb +6 -6
  77. data/web/views/metrics.erb +38 -30
  78. data/web/views/metrics_for_job.erb +29 -38
  79. data/web/views/morgue.erb +2 -2
  80. data/web/views/queue.erb +1 -1
  81. data/web/views/queues.erb +6 -2
  82. metadata +33 -13
data/lib/sidekiq/api.rb CHANGED
@@ -4,7 +4,6 @@ require "sidekiq"
4
4
 
5
5
  require "zlib"
6
6
  require "set"
7
- require "base64"
8
7
 
9
8
  require "sidekiq/metrics/query"
10
9
 
@@ -374,7 +373,7 @@ module Sidekiq
374
373
  def display_class
375
374
  # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
376
375
  @klass ||= self["display_class"] || begin
377
- if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
376
+ if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" || klass == "Sidekiq::ActiveJob::Wrapper"
378
377
  job_class = @item["wrapped"] || args[0]
379
378
  if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob"
380
379
  # MailerClass#mailer_method
@@ -390,7 +389,7 @@ module Sidekiq
390
389
 
391
390
  def display_args
392
391
  # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
393
- @display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
392
+ @display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" || klass == "Sidekiq::ActiveJob::Wrapper"
394
393
  job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : []
395
394
  if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
396
395
  # remove MailerClass, mailer_method and 'deliver_now'
@@ -491,8 +490,8 @@ module Sidekiq
491
490
  end
492
491
 
493
492
  def uncompress_backtrace(backtrace)
494
- decoded = Base64.decode64(backtrace)
495
- uncompressed = Zlib::Inflate.inflate(decoded)
493
+ strict_base64_decoded = backtrace.unpack1("m")
494
+ uncompressed = Zlib::Inflate.inflate(strict_base64_decoded)
496
495
  Sidekiq.load_json(uncompressed)
497
496
  end
498
497
  end
@@ -669,6 +668,41 @@ module Sidekiq
669
668
  end
670
669
  end
671
670
 
671
+ def pop_each
672
+ Sidekiq.redis do |c|
673
+ size.times do
674
+ data, score = c.zpopmin(name, 1)&.first
675
+ break unless data
676
+ yield data, score
677
+ end
678
+ end
679
+ end
680
+
681
+ def retry_all
682
+ c = Sidekiq::Client.new
683
+ pop_each do |msg, _|
684
+ job = Sidekiq.load_json(msg)
685
+ # Manual retries should not count against the retry limit.
686
+ job["retry_count"] -= 1 if job["retry_count"]
687
+ c.push(job)
688
+ end
689
+ end
690
+
691
+ # Move all jobs from this Set to the Dead Set.
692
+ # See DeadSet#kill
693
+ def kill_all(notify_failure: false, ex: nil)
694
+ ds = DeadSet.new
695
+ opts = {notify_failure: notify_failure, ex: ex, trim: false}
696
+
697
+ begin
698
+ pop_each do |msg, _|
699
+ ds.kill(msg, opts)
700
+ end
701
+ ensure
702
+ ds.trim
703
+ end
704
+ end
705
+
672
706
  def each
673
707
  initial_size = @_size
674
708
  offset_size = 0
@@ -679,7 +713,7 @@ module Sidekiq
679
713
  range_start = page * page_size + offset_size
680
714
  range_end = range_start + page_size - 1
681
715
  elements = Sidekiq.redis { |conn|
682
- conn.zrange name, range_start, range_end, withscores: true
716
+ conn.zrange name, range_start, range_end, "withscores"
683
717
  }
684
718
  break if elements.empty?
685
719
  page -= 1
@@ -706,7 +740,7 @@ module Sidekiq
706
740
  end
707
741
 
708
742
  elements = Sidekiq.redis { |conn|
709
- conn.zrange(name, begin_score, end_score, "BYSCORE", withscores: true)
743
+ conn.zrange(name, begin_score, end_score, "BYSCORE", "withscores")
710
744
  }
711
745
 
712
746
  elements.each_with_object([]) do |element, result|
@@ -766,39 +800,21 @@ module Sidekiq
766
800
 
767
801
  ##
768
802
  # The set of scheduled jobs within Sidekiq.
769
- # Based on this, you can search/filter for jobs. Here's an
770
- # example where I'm selecting jobs based on some complex logic
771
- # and deleting them from the scheduled set.
772
- #
773
803
  # See the API wiki page for usage notes and examples.
774
804
  #
775
805
  class ScheduledSet < JobSet
776
806
  def initialize
777
- super "schedule"
807
+ super("schedule")
778
808
  end
779
809
  end
780
810
 
781
811
  ##
782
812
  # The set of retries within Sidekiq.
783
- # Based on this, you can search/filter for jobs. Here's an
784
- # example where I'm selecting all jobs of a certain type
785
- # and deleting them from the retry queue.
786
- #
787
813
  # See the API wiki page for usage notes and examples.
788
814
  #
789
815
  class RetrySet < JobSet
790
816
  def initialize
791
- super "retry"
792
- end
793
-
794
- # Enqueues all jobs pending within the retry set.
795
- def retry_all
796
- each(&:retry) while size > 0
797
- end
798
-
799
- # Kills all jobs pending within the retry set.
800
- def kill_all
801
- each(&:kill) while size > 0
817
+ super("retry")
802
818
  end
803
819
  end
804
820
 
@@ -809,36 +825,48 @@ module Sidekiq
809
825
  #
810
826
  class DeadSet < JobSet
811
827
  def initialize
812
- super "dead"
828
+ super("dead")
829
+ end
830
+
831
+ # Trim dead jobs which are over our storage limits
832
+ def trim
833
+ hash = Sidekiq.default_configuration
834
+ now = Time.now.to_f
835
+ Sidekiq.redis do |conn|
836
+ conn.multi do |transaction|
837
+ transaction.zremrangebyscore(name, "-inf", now - hash[:dead_timeout_in_seconds])
838
+ transaction.zremrangebyrank(name, 0, - hash[:dead_max_jobs])
839
+ end
840
+ end
813
841
  end
814
842
 
815
843
  # Add the given job to the Dead set.
816
844
  # @param message [String] the job data as JSON
845
+ # @option opts [Boolean] :notify_failure (true) Whether death handlers should be called
846
+ # @option opts [Boolean] :trim (true) Whether Sidekiq should trim the structure to keep it within configuration
847
+ # @option opts [Exception] :ex (RuntimeError) An exception to pass to the death handlers
817
848
  def kill(message, opts = {})
818
849
  now = Time.now.to_f
819
850
  Sidekiq.redis do |conn|
820
- conn.multi do |transaction|
821
- transaction.zadd(name, now.to_s, message)
822
- transaction.zremrangebyscore(name, "-inf", now - Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds])
823
- transaction.zremrangebyrank(name, 0, - Sidekiq::Config::DEFAULTS[:dead_max_jobs])
824
- end
851
+ conn.zadd(name, now.to_s, message)
825
852
  end
826
853
 
854
+ trim if opts[:trim] != false
855
+
827
856
  if opts[:notify_failure] != false
828
857
  job = Sidekiq.load_json(message)
829
- r = RuntimeError.new("Job killed by API")
830
- r.set_backtrace(caller)
858
+ if opts[:ex]
859
+ ex = opts[:ex]
860
+ else
861
+ ex = RuntimeError.new("Job killed by API")
862
+ ex.set_backtrace(caller)
863
+ end
831
864
  Sidekiq.default_configuration.death_handlers.each do |handle|
832
- handle.call(job, r)
865
+ handle.call(job, ex)
833
866
  end
834
867
  end
835
868
  true
836
869
  end
837
-
838
- # Enqueue all dead jobs
839
- def retry_all
840
- each(&:retry) while size > 0
841
- end
842
870
  end
843
871
 
844
872
  ##
@@ -881,7 +909,7 @@ module Sidekiq
881
909
  # @api private
882
910
  def cleanup
883
911
  # dont run cleanup more than once per minute
884
- return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
912
+ return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") }
885
913
 
886
914
  count = 0
887
915
  Sidekiq.redis do |conn|
@@ -1110,11 +1138,11 @@ module Sidekiq
1110
1138
 
1111
1139
  procs.zip(all_works).each do |key, workers|
1112
1140
  workers.each_pair do |tid, json|
1113
- results << [key, tid, Sidekiq.load_json(json)] unless json.empty?
1141
+ results << [key, tid, Sidekiq::Work.new(key, tid, Sidekiq.load_json(json))] unless json.empty?
1114
1142
  end
1115
1143
  end
1116
1144
 
1117
- results.sort_by { |(_, _, hsh)| hsh["run_at"] }.each(&block)
1145
+ results.sort_by { |(_, _, hsh)| hsh.raw("run_at") }.each(&block)
1118
1146
  end
1119
1147
 
1120
1148
  # Note that #size is only as accurate as Sidekiq's heartbeat,
@@ -1137,7 +1165,74 @@ module Sidekiq
1137
1165
  end
1138
1166
  end
1139
1167
  end
1168
+
1169
+ ##
1170
+ # Find the work which represents a job with the given JID.
1171
+ # *This is a slow O(n) operation*. Do not use for app logic.
1172
+ #
1173
+ # @param jid [String] the job identifier
1174
+ # @return [Sidekiq::Work] the work or nil
1175
+ def find_work_by_jid(jid)
1176
+ each do |_process_id, _thread_id, work|
1177
+ job = work.job
1178
+ return work if job.jid == jid
1179
+ end
1180
+ nil
1181
+ end
1182
+ end
1183
+
1184
+ # Sidekiq::Work represents a job which is currently executing.
1185
+ class Work
1186
+ attr_reader :process_id
1187
+ attr_reader :thread_id
1188
+
1189
+ def initialize(pid, tid, hsh)
1190
+ @process_id = pid
1191
+ @thread_id = tid
1192
+ @hsh = hsh
1193
+ @job = nil
1194
+ end
1195
+
1196
+ def queue
1197
+ @hsh["queue"]
1198
+ end
1199
+
1200
+ def run_at
1201
+ Time.at(@hsh["run_at"])
1202
+ end
1203
+
1204
+ def job
1205
+ @job ||= Sidekiq::JobRecord.new(@hsh["payload"])
1206
+ end
1207
+
1208
+ def payload
1209
+ @hsh["payload"]
1210
+ end
1211
+
1212
+ # deprecated
1213
+ def [](key)
1214
+ kwargs = {uplevel: 1}
1215
+ kwargs[:category] = :deprecated if RUBY_VERSION > "3.0" # TODO
1216
+ warn("Direct access to `Sidekiq::Work` attributes is deprecated, please use `#payload`, `#queue`, `#run_at` or `#job` instead", **kwargs)
1217
+
1218
+ @hsh[key]
1219
+ end
1220
+
1221
+ # :nodoc:
1222
+ # @api private
1223
+ def raw(name)
1224
+ @hsh[name]
1225
+ end
1226
+
1227
+ def method_missing(*all)
1228
+ @hsh.send(*all)
1229
+ end
1230
+
1231
+ def respond_to_missing?(name, *args)
1232
+ @hsh.respond_to?(name)
1233
+ end
1140
1234
  end
1235
+
1141
1236
  # Since "worker" is a nebulous term, we've deprecated the use of this class name.
1142
1237
  # Is "worker" a process, a type of job, a thread? Undefined!
1143
1238
  # WorkSet better describes the data.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "sidekiq/component"
2
4
 
3
5
  module Sidekiq
@@ -17,6 +19,7 @@ module Sidekiq
17
19
  # end
18
20
  class Capsule
19
21
  include Sidekiq::Component
22
+ extend Forwardable
20
23
 
21
24
  attr_reader :name
22
25
  attr_reader :queues
@@ -24,6 +27,8 @@ module Sidekiq
24
27
  attr_reader :mode
25
28
  attr_reader :weights
26
29
 
30
+ def_delegators :@config, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
31
+
27
32
  def initialize(name, config)
28
33
  @name = name
29
34
  @config = config
@@ -35,9 +40,9 @@ module Sidekiq
35
40
 
36
41
  def fetcher
37
42
  @fetcher ||= begin
38
- inst = (config[:fetch_class] || Sidekiq::BasicFetch).new(self)
39
- inst.setup(config[:fetch_setup]) if inst.respond_to?(:setup)
40
- inst
43
+ instance = (config[:fetch_class] || Sidekiq::BasicFetch).new(self)
44
+ instance.setup(config[:fetch_setup]) if instance.respond_to?(:setup)
45
+ instance
41
46
  end
42
47
  end
43
48
 
data/lib/sidekiq/cli.rb CHANGED
@@ -38,7 +38,7 @@ module Sidekiq # :nodoc:
38
38
  # Code within this method is not tested because it alters
39
39
  # global process state irreversibly. PRs which improve the
40
40
  # test coverage of Sidekiq::CLI are welcomed.
41
- def run(boot_app: true)
41
+ def run(boot_app: true, warmup: true)
42
42
  boot_application if boot_app
43
43
 
44
44
  if environment == "development" && $stdout.tty? && @config.logger.formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
@@ -101,6 +101,8 @@ module Sidekiq # :nodoc:
101
101
  # Touch middleware so it isn't lazy loaded by multiple threads, #3043
102
102
  @config.server_middleware
103
103
 
104
+ ::Process.warmup if warmup && ::Process.respond_to?(:warmup) && ENV["RUBY_DISABLE_WARMUP"] != "1"
105
+
104
106
  # Before this point, the process is initializing with just the main thread.
105
107
  # Starting here the process will now have multiple threads running.
106
108
  fire_event(:startup, reverse: false, reraise: true)
@@ -421,3 +423,4 @@ end
421
423
 
422
424
  require "sidekiq/systemd"
423
425
  require "sidekiq/metrics/tracking"
426
+ require "sidekiq/job/interrupt_handler"
@@ -58,6 +58,23 @@ module Sidekiq
58
58
  end
59
59
  end
60
60
 
61
+ # Cancel the IterableJob with the given JID.
62
+ # **NB: Cancellation is asynchronous.** Iteration checks every
63
+ # five seconds so this will not immediately stop the given job.
64
+ def cancel!(jid)
65
+ key = "it-#{jid}"
66
+ _, result, _ = Sidekiq.redis do |c|
67
+ c.pipelined do |p|
68
+ p.hsetnx(key, "cancelled", Time.now.to_i)
69
+ p.hget(key, "cancelled")
70
+ p.expire(key, Sidekiq::Job::Iterable::STATE_TTL)
71
+ # TODO When Redis 7.2 is required
72
+ # p.expire(key, Sidekiq::Job::Iterable::STATE_TTL, "nx")
73
+ end
74
+ end
75
+ result.to_i
76
+ end
77
+
61
78
  ##
62
79
  # The main method used to push a job to Redis. Accepts a number of options:
63
80
  #
@@ -248,9 +265,12 @@ module Sidekiq
248
265
  def atomic_push(conn, payloads)
249
266
  if payloads.first.key?("at")
250
267
  conn.zadd("schedule", payloads.flat_map { |hash|
251
- at = hash.delete("at").to_s
268
+ at = hash["at"].to_s
252
269
  # ActiveJob sets this but the job has not been enqueued yet
253
270
  hash.delete("enqueued_at")
271
+ # TODO: Use hash.except("at") when support for Ruby 2.7 is dropped
272
+ hash = hash.dup
273
+ hash.delete("at")
254
274
  [at, Sidekiq.dump_json(hash)]
255
275
  })
256
276
  else
@@ -64,5 +64,27 @@ module Sidekiq
64
64
  end
65
65
  arr.clear if oneshot # once we've fired an event, we never fire it again
66
66
  end
67
+
68
+ # When you have a large tree of components, the `inspect` output
69
+ # can get out of hand, especially with lots of Sidekiq::Config
70
+ # references everywhere. We avoid calling `inspect` on more complex
71
+ # state and use `to_s` instead to keep output manageable, #6553
72
+ def inspect
73
+ "#<#{self.class.name} #{
74
+ instance_variables.map do |name|
75
+ value = instance_variable_get(name)
76
+ case value
77
+ when Proc
78
+ "#{name}=#{value}"
79
+ when Sidekiq::Config
80
+ "#{name}=#{value}"
81
+ when Sidekiq::Component
82
+ "#{name}=#{value}"
83
+ else
84
+ "#{name}=#{value.inspect}"
85
+ end
86
+ end.join(", ")
87
+ }>"
88
+ end
67
89
  end
68
90
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "forwardable"
2
4
 
3
5
  require "set"
@@ -17,6 +19,10 @@ module Sidekiq
17
19
  poll_interval_average: nil,
18
20
  average_scheduled_poll_interval: 5,
19
21
  on_complex_arguments: :raise,
22
+ iteration: {
23
+ max_job_runtime: nil,
24
+ retry_backoff: 0
25
+ },
20
26
  error_handlers: [],
21
27
  death_handlers: [],
22
28
  lifecycle_events: {
@@ -52,9 +58,15 @@ module Sidekiq
52
58
  @capsules = {}
53
59
  end
54
60
 
55
- def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
61
+ def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
56
62
  attr_reader :capsules
57
63
 
64
+ def inspect
65
+ "#<#{self.class.name} @options=#{
66
+ @options.except(:lifecycle_events, :reloader, :death_handlers, :error_handlers).inspect
67
+ }>"
68
+ end
69
+
58
70
  def to_json(*)
59
71
  Sidekiq.dump_json(@options)
60
72
  end
@@ -179,7 +191,13 @@ module Sidekiq
179
191
 
180
192
  # register global singletons which can be accessed elsewhere
181
193
  def register(name, instance)
182
- @directory[name] = instance
194
+ # logger.debug("register[#{name}] = #{instance}")
195
+ # Sidekiq Enterprise lazy registers a few services so we
196
+ # can't lock down this hash completely.
197
+ hash = @directory.dup
198
+ hash[name] = instance
199
+ @directory = hash.freeze
200
+ instance
183
201
  end
184
202
 
185
203
  # find a singleton
@@ -187,10 +205,16 @@ module Sidekiq
187
205
  # JNDI is just a fancy name for a hash lookup
188
206
  @directory.fetch(name) do |key|
189
207
  return nil unless default_class
190
- @directory[key] = default_class.new(self)
208
+ register(key, default_class.new(self))
191
209
  end
192
210
  end
193
211
 
212
+ def freeze!
213
+ @directory.freeze
214
+ @options.freeze
215
+ true
216
+ end
217
+
194
218
  ##
195
219
  # Death handlers are called when all retries for a job have been exhausted and
196
220
  # the job dies. It's the notification to your application
@@ -258,9 +282,9 @@ module Sidekiq
258
282
  @logger = logger
259
283
  end
260
284
 
261
- private def arity(handler)
262
- return handler.arity if handler.is_a?(Proc)
263
- handler.method(:call).arity
285
+ private def parameter_size(handler)
286
+ target = handler.is_a?(Proc) ? handler : handler.method(:call)
287
+ target.parameters.size
264
288
  end
265
289
 
266
290
  # INTERNAL USE ONLY
@@ -269,7 +293,7 @@ module Sidekiq
269
293
  p ["!!!!!", ex]
270
294
  end
271
295
  @options[:error_handlers].each do |handler|
272
- if arity(handler) == 2
296
+ if parameter_size(handler) == 2
273
297
  # TODO Remove in 8.0
274
298
  logger.info { "DEPRECATION: Sidekiq exception handlers now take three arguments, see #{handler}" }
275
299
  handler.call(ex, {_config: self}.merge(ctx))
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "sidekiq/redis_connection"
2
4
  require "time"
3
5
 
@@ -34,7 +36,7 @@ module Sidekiq
34
36
  # handle an very common error in marking deploys:
35
37
  # having every process mark its deploy, leading
36
38
  # to N marks for each deploy. Instead we round the time
37
- # to the minute so that multple marks within that minute
39
+ # to the minute so that multiple marks within that minute
38
40
  # will all naturally rollup into one mark per minute.
39
41
  whence = at.utc
40
42
  floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
@@ -44,7 +46,7 @@ module Sidekiq
44
46
 
45
47
  @pool.with do |c|
46
48
  # only allow one deploy mark for a given label for the next minute
47
- lock = c.set("deploylock-#{label}", stamp, nx: true, ex: 60)
49
+ lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
48
50
  if lock
49
51
  c.multi do |pipe|
50
52
  pipe.hsetnx(key, stamp, label)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "sidekiq/component"
2
4
  require "sidekiq/launcher"
3
5
  require "sidekiq/metrics/tracking"
data/lib/sidekiq/fetch.rb CHANGED
@@ -44,7 +44,7 @@ module Sidekiq # :nodoc:
44
44
  return nil
45
45
  end
46
46
 
47
- queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
47
+ queue, job = redis { |conn| conn.blocking_call(TIMEOUT, "brpop", *qs, TIMEOUT) }
48
48
  UnitOfWork.new(queue, job, config) if queue
49
49
  end
50
50
 
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sidekiq/job/iterable"
4
+
5
+ # Iterable jobs are ones which provide a sequence to process using
6
+ # `build_enumerator(*args, cursor: cursor)` and then process each
7
+ # element of that sequence in `each_iteration(item, *args)`.
8
+ #
9
+ # The job is kicked off as normal:
10
+ #
11
+ # ProcessUserSet.perform_async(123)
12
+ #
13
+ # but instead of calling `perform`, Sidekiq will call:
14
+ #
15
+ # enum = ProcessUserSet#build_enumerator(123, cursor:nil)
16
+ #
17
+ # Your Enumerator must yield `(object, updated_cursor)` and
18
+ # Sidekiq will call your `each_iteration` method:
19
+ #
20
+ # ProcessUserSet#each_iteration(object, 123)
21
+ #
22
+ # After every iteration, Sidekiq will check for shutdown. If we are
23
+ # stopping, the cursor will be saved to Redis and the job re-queued
24
+ # to pick up the rest of the work upon restart. Your job will get
25
+ # the updated_cursor so it can pick up right where it stopped.
26
+ #
27
+ # enum = ProcessUserSet#build_enumerator(123, cursor: updated_cursor)
28
+ #
29
+ # The cursor object must be serializable to JSON.
30
+ #
31
+ # Note there are several APIs to help you build enumerators for
32
+ # ActiveRecord Relations, CSV files, etc. See sidekiq/job/iterable/*.rb.
33
+ module Sidekiq
34
+ module IterableJob
35
+ def self.included(base)
36
+ base.include Sidekiq::Job
37
+ base.include Sidekiq::Job::Iterable
38
+ end
39
+
40
+ # def build_enumerator(*args, cursor:)
41
+ # def each_iteration(item, *args)
42
+
43
+ # Your job can also define several callbacks during points
44
+ # in each job's lifecycle.
45
+ #
46
+ # def on_start
47
+ # def on_resume
48
+ # def on_stop
49
+ # def on_complete
50
+ # def around_iteration
51
+ #
52
+ # To keep things simple and compatible, this is the same
53
+ # API as the `sidekiq-iteration` gem.
54
+ end
55
+ end
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ module Job
5
+ class InterruptHandler
6
+ include Sidekiq::ServerMiddleware
7
+
8
+ def call(instance, hash, queue)
9
+ yield
10
+ rescue Interrupted
11
+ logger.debug "Interrupted, re-queueing..."
12
+ c = Sidekiq::Client.new
13
+ c.push(hash)
14
+ raise Sidekiq::JobRetry::Skip
15
+ end
16
+ end
17
+ end
18
+ end
19
+
20
+ Sidekiq.configure_server do |config|
21
+ config.server_middleware do |chain|
22
+ chain.add Sidekiq::Job::InterruptHandler
23
+ end
24
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ module Job
5
+ module Iterable
6
+ # @api private
7
+ class ActiveRecordEnumerator
8
+ def initialize(relation, cursor: nil, **options)
9
+ @relation = relation
10
+ @cursor = cursor
11
+ @options = options
12
+ end
13
+
14
+ def records
15
+ Enumerator.new(-> { @relation.count }) do |yielder|
16
+ @relation.find_each(**@options, start: @cursor) do |record|
17
+ yielder.yield(record, record.id)
18
+ end
19
+ end
20
+ end
21
+
22
+ def batches
23
+ Enumerator.new(-> { @relation.count }) do |yielder|
24
+ @relation.find_in_batches(**@options, start: @cursor) do |batch|
25
+ yielder.yield(batch, batch.first.id)
26
+ end
27
+ end
28
+ end
29
+
30
+ def relations
31
+ Enumerator.new(-> { relations_size }) do |yielder|
32
+ # Convenience to use :batch_size for all the
33
+ # ActiveRecord batching methods.
34
+ options = @options.dup
35
+ options[:of] ||= options.delete(:batch_size)
36
+
37
+ @relation.in_batches(**options, start: @cursor) do |relation|
38
+ first_record = relation.first
39
+ yielder.yield(relation, first_record.id)
40
+ end
41
+ end
42
+ end
43
+
44
+ private
45
+
46
+ def relations_size
47
+ batch_size = @options[:batch_size] || 1000
48
+ (@relation.count + batch_size - 1) / batch_size # ceiling division
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end