sidekiq 6.5.1 → 6.5.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1afbc6a1a0b14403e9148e746c08e0a2b24e634fca05288982c96719675607de
4
- data.tar.gz: 3ff3f8df76b565f42030462eb8d09b673b89751dcd6a0b7c41a255789960d321
3
+ metadata.gz: f2c97836bf831cdce9bbab92fbd7b0d01d3522c24f5cced0e0822a53b906d0af
4
+ data.tar.gz: 85c7f9abe6844a5471519ca235c65e84275bd2f8d33e395ec3f85bada091c699
5
5
  SHA512:
6
- metadata.gz: e8a68611735322d98cc517f1d03ef02394497f8eb505e0db496909cac1f6b7f0179f38ce1966546f717115395102c78fdad9ae0fd947360307b9288dcc22b369
7
- data.tar.gz: 163e41dfb153a4e2ec50d407bde08cb8a395909807053f24478a73af1a6a9858b5aec92cbf618d4d066560a2b7bedaa2f88b48f823609e2d78f21320270cb97e
6
+ metadata.gz: 06e0cad7074f7288c4ec66ee0f8e38e85c556390f61a23dc9dc025459a0da57ed0641ae89c450862c2101045d4220e5e11ad8b7a23f5316fbfe7fb12fc04ec67
7
+ data.tar.gz: 7362db4b038735acda91a86017053cc5fec9b70f1bbc8b0650abb592b60598c49a07fbecd3ec14b18a9f6b1cf6e3b2e0810b64e70e833072b64248a945f45646
data/Changes.md CHANGED
@@ -2,6 +2,17 @@
2
2
 
3
3
  [Sidekiq Changes](https://github.com/mperham/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/main/Ent-Changes.md)
4
4
 
5
+ 6.5.2
6
+ ----------
7
+
8
+ - [Job Metrics are under active development, help wanted!](https://github.com/mperham/sidekiq/wiki/Metrics#contributing) **BETA**
9
+ - Add `Context` column on queue page which shows any CurrentAttributes [#5450]
10
+ - `sidekiq_retry_in` may now return `:discard` or `:kill` to dynamically stop job retries [#5406]
11
+ - Smarter sorting of processes in /busy Web UI [#5398]
12
+ - Fix broken hamburger menu in mobile UI [#5428]
13
+ - Require redis-rb 4.5.0. Note that Sidekiq will break if you use the
14
+ [`Redis.exists_returns_integer = false`](https://github.com/redis/redis-rb/blob/master/CHANGELOG.md#450) flag. [#5394]
15
+
5
16
  6.5.1
6
17
  ----------
7
18
 
data/lib/sidekiq/api.rb CHANGED
@@ -3,9 +3,17 @@
3
3
  require "sidekiq"
4
4
 
5
5
  require "zlib"
6
+ require "set"
6
7
  require "base64"
8
+ require "sidekiq/metrics/deploy"
9
+ require "sidekiq/metrics/query"
7
10
 
8
11
  module Sidekiq
12
+ # Retrieve runtime statistics from Redis regarding
13
+ # this Sidekiq cluster.
14
+ #
15
+ # stat = Sidekiq::Stats.new
16
+ # stat.processed
9
17
  class Stats
10
18
  def initialize
11
19
  fetch_stats_fast!
@@ -52,6 +60,7 @@ module Sidekiq
52
60
  end
53
61
 
54
62
  # O(1) redis calls
63
+ # @api private
55
64
  def fetch_stats_fast!
56
65
  pipe1_res = Sidekiq.redis { |conn|
57
66
  conn.pipelined do |pipeline|
@@ -91,6 +100,7 @@ module Sidekiq
91
100
  end
92
101
 
93
102
  # O(number of processes + number of queues) redis calls
103
+ # @api private
94
104
  def fetch_stats_slow!
95
105
  processes = Sidekiq.redis { |conn|
96
106
  conn.sscan_each("processes").to_a
@@ -116,11 +126,13 @@ module Sidekiq
116
126
  @stats
117
127
  end
118
128
 
129
+ # @api private
119
130
  def fetch_stats!
120
131
  fetch_stats_fast!
121
132
  fetch_stats_slow!
122
133
  end
123
134
 
135
+ # @api private
124
136
  def reset(*stats)
125
137
  all = %w[failed processed]
126
138
  stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
@@ -202,9 +214,10 @@ module Sidekiq
202
214
  end
203
215
 
204
216
  ##
205
- # Encapsulates a queue within Sidekiq.
217
+ # Represents a queue within Sidekiq.
206
218
  # Allows enumeration of all jobs within the queue
207
- # and deletion of jobs.
219
+ # and deletion of jobs. NB: this queue data is real-time
220
+ # and is changing within Redis moment by moment.
208
221
  #
209
222
  # queue = Sidekiq::Queue.new("mailer")
210
223
  # queue.each do |job|
@@ -212,7 +225,6 @@ module Sidekiq
212
225
  # job.args # => [1, 2, 3]
213
226
  # job.delete if job.jid == 'abcdef1234567890'
214
227
  # end
215
- #
216
228
  class Queue
217
229
  include Enumerable
218
230
 
@@ -296,6 +308,7 @@ module Sidekiq
296
308
  end
297
309
 
298
310
  # delete all jobs within this queue
311
+ # @return [Boolean] true
299
312
  def clear
300
313
  Sidekiq.redis do |conn|
301
314
  conn.multi do |transaction|
@@ -303,34 +316,45 @@ module Sidekiq
303
316
  transaction.srem("queues", name)
304
317
  end
305
318
  end
319
+ true
306
320
  end
307
321
  alias_method :💣, :clear
308
322
 
309
- def as_json(options = nil) # :nodoc:
323
+ # :nodoc:
324
+ # @api private
325
+ def as_json(options = nil)
310
326
  {name: name} # 5336
311
327
  end
312
328
  end
313
329
 
314
330
  ##
315
- # Encapsulates a pending job within a Sidekiq queue or
316
- # sorted set.
331
+ # Represents a pending job within a Sidekiq queue.
317
332
  #
318
333
  # The job should be considered immutable but may be
319
334
  # removed from the queue via JobRecord#delete.
320
- #
321
335
  class JobRecord
336
+ # the parsed Hash of job data
337
+ # @!attribute [r] Item
322
338
  attr_reader :item
339
+ # the underlying String in Redis
340
+ # @!attribute [r] Value
323
341
  attr_reader :value
342
+ # the queue associated with this job
343
+ # @!attribute [r] Queue
324
344
  attr_reader :queue
325
345
 
326
- def initialize(item, queue_name = nil) # :nodoc:
346
+ # :nodoc:
347
+ # @api private
348
+ def initialize(item, queue_name = nil)
327
349
  @args = nil
328
350
  @value = item
329
351
  @item = item.is_a?(Hash) ? item : parse(item)
330
352
  @queue = queue_name || @item["queue"]
331
353
  end
332
354
 
333
- def parse(item) # :nodoc:
355
+ # :nodoc:
356
+ # @api private
357
+ def parse(item)
334
358
  Sidekiq.load_json(item)
335
359
  rescue JSON::ParserError
336
360
  # If the job payload in Redis is invalid JSON, we'll load
@@ -341,6 +365,8 @@ module Sidekiq
341
365
  {}
342
366
  end
343
367
 
368
+ # This is the job class which Sidekiq will execute. If using ActiveJob,
369
+ # this class will be the ActiveJob adapter class rather than a specific job.
344
370
  def klass
345
371
  self["class"]
346
372
  end
@@ -480,21 +506,27 @@ module Sidekiq
480
506
  end
481
507
 
482
508
  # Represents a job within a Redis sorted set where the score
483
- # represents a timestamp for the job.
509
+ # represents a timestamp associated with the job. This timestamp
510
+ # could be the scheduled time for it to run (e.g. scheduled set),
511
+ # or the expiration date after which the entry should be deleted (e.g. dead set).
484
512
  class SortedEntry < JobRecord
485
513
  attr_reader :score
486
514
  attr_reader :parent
487
515
 
488
- def initialize(parent, score, item) # :nodoc:
516
+ # :nodoc:
517
+ # @api private
518
+ def initialize(parent, score, item)
489
519
  super(item)
490
520
  @score = Float(score)
491
521
  @parent = parent
492
522
  end
493
523
 
524
+ # The timestamp associated with this entry
494
525
  def at
495
526
  Time.at(score).utc
496
527
  end
497
528
 
529
+ # remove this entry from the sorted set
498
530
  def delete
499
531
  if @value
500
532
  @parent.delete_by_value(@parent.name, @value)
@@ -505,7 +537,7 @@ module Sidekiq
505
537
 
506
538
  # Change the scheduled time for this job.
507
539
  #
508
- # @param [Time] the new timestamp when this job will be enqueued.
540
+ # @param at [Time] the new timestamp for this job
509
541
  def reschedule(at)
510
542
  Sidekiq.redis do |conn|
511
543
  conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item))
@@ -579,20 +611,32 @@ module Sidekiq
579
611
  end
580
612
  end
581
613
 
614
+ # Base class for all sorted sets within Sidekiq.
582
615
  class SortedSet
583
616
  include Enumerable
584
617
 
618
+ # Redis key of the set
619
+ # @!attribute [r] Name
585
620
  attr_reader :name
586
621
 
622
+ # :nodoc:
623
+ # @api private
587
624
  def initialize(name)
588
625
  @name = name
589
626
  @_size = size
590
627
  end
591
628
 
629
+ # real-time size of the set, will change
592
630
  def size
593
631
  Sidekiq.redis { |c| c.zcard(name) }
594
632
  end
595
633
 
634
+ # Scan through each element of the sorted set, yielding each to the supplied block.
635
+ # Please see Redis's <a href="https://redis.io/commands/scan/">SCAN documentation</a> for implementation details.
636
+ #
637
+ # @param match [String] a snippet or regexp to filter matches.
638
+ # @param count [Integer] number of elements to retrieve at a time, default 100
639
+ # @yieldparam [Sidekiq::SortedEntry] each entry
596
640
  def scan(match, count = 100)
597
641
  return to_enum(:scan, match, count) unless block_given?
598
642
 
@@ -604,22 +648,32 @@ module Sidekiq
604
648
  end
605
649
  end
606
650
 
651
+ # @return [Boolean] always true
607
652
  def clear
608
653
  Sidekiq.redis do |conn|
609
654
  conn.unlink(name)
610
655
  end
656
+ true
611
657
  end
612
658
  alias_method :💣, :clear
613
659
 
614
- def as_json(options = nil) # :nodoc:
660
+ # :nodoc:
661
+ # @api private
662
+ def as_json(options = nil)
615
663
  {name: name} # 5336
616
664
  end
617
665
  end
618
666
 
667
+ # Base class for all sorted sets which contain jobs, e.g. scheduled, retry and dead.
668
+ # Sidekiq Pro and Enterprise add additional sorted sets which do not contain job data,
669
+ # e.g. Batches.
619
670
  class JobSet < SortedSet
620
- def schedule(timestamp, message)
671
+ # Add a job with the associated timestamp to this set.
672
+ # @param timestamp [Time] the score for the job
673
+ # @param job [Hash] the job data
674
+ def schedule(timestamp, job)
621
675
  Sidekiq.redis do |conn|
622
- conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message))
676
+ conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(job))
623
677
  end
624
678
  end
625
679
 
@@ -647,6 +701,10 @@ module Sidekiq
647
701
  ##
648
702
  # Fetch jobs that match a given time or Range. Job ID is an
649
703
  # optional second argument.
704
+ #
705
+ # @param score [Time,Range] a specific timestamp or range
706
+ # @param jid [String, optional] find a specific JID within the score
707
+ # @return [Array<SortedEntry>] any results found, can be empty
650
708
  def fetch(score, jid = nil)
651
709
  begin_score, end_score =
652
710
  if score.is_a?(Range)
@@ -668,7 +726,10 @@ module Sidekiq
668
726
 
669
727
  ##
670
728
  # Find the job with the given JID within this sorted set.
671
- # This is a slower O(n) operation. Do not use for app logic.
729
+ # *This is a slow O(n) operation*. Do not use for app logic.
730
+ #
731
+ # @param jid [String] the job identifier
732
+ # @return [SortedEntry] the record or nil
672
733
  def find_job(jid)
673
734
  Sidekiq.redis do |conn|
674
735
  conn.zscan_each(name, match: "*#{jid}*", count: 100) do |entry, score|
@@ -680,6 +741,8 @@ module Sidekiq
680
741
  nil
681
742
  end
682
743
 
744
+ # :nodoc:
745
+ # @api private
683
746
  def delete_by_value(name, value)
684
747
  Sidekiq.redis do |conn|
685
748
  ret = conn.zrem(name, value)
@@ -688,6 +751,8 @@ module Sidekiq
688
751
  end
689
752
  end
690
753
 
754
+ # :nodoc:
755
+ # @api private
691
756
  def delete_by_jid(score, jid)
692
757
  Sidekiq.redis do |conn|
693
758
  elements = conn.zrangebyscore(name, score, score)
@@ -708,10 +773,10 @@ module Sidekiq
708
773
  end
709
774
 
710
775
  ##
711
- # Allows enumeration of scheduled jobs within Sidekiq.
776
+ # The set of scheduled jobs within Sidekiq.
712
777
  # Based on this, you can search/filter for jobs. Here's an
713
- # example where I'm selecting all jobs of a certain type
714
- # and deleting them from the schedule queue.
778
+ # example where I'm selecting jobs based on some complex logic
779
+ # and deleting them from the scheduled set.
715
780
  #
716
781
  # r = Sidekiq::ScheduledSet.new
717
782
  # r.select do |scheduled|
@@ -726,7 +791,7 @@ module Sidekiq
726
791
  end
727
792
 
728
793
  ##
729
- # Allows enumeration of retries within Sidekiq.
794
+ # The set of retries within Sidekiq.
730
795
  # Based on this, you can search/filter for jobs. Here's an
731
796
  # example where I'm selecting all jobs of a certain type
732
797
  # and deleting them from the retry queue.
@@ -742,23 +807,29 @@ module Sidekiq
742
807
  super "retry"
743
808
  end
744
809
 
810
+ # Enqueues all jobs pending within the retry set.
745
811
  def retry_all
746
812
  each(&:retry) while size > 0
747
813
  end
748
814
 
815
+ # Kills all jobs pending within the retry set.
749
816
  def kill_all
750
817
  each(&:kill) while size > 0
751
818
  end
752
819
  end
753
820
 
754
821
  ##
755
- # Allows enumeration of dead jobs within Sidekiq.
822
+ # The set of dead jobs within Sidekiq. Dead jobs have failed all of
823
+ # their retries and are helding in this set pending some sort of manual
824
+ # fix. They will be removed after 6 months (dead_timeout) if not.
756
825
  #
757
826
  class DeadSet < JobSet
758
827
  def initialize
759
828
  super "dead"
760
829
  end
761
830
 
831
+ # Add the given job to the Dead set.
832
+ # @param message [String] the job data as JSON
762
833
  def kill(message, opts = {})
763
834
  now = Time.now.to_f
764
835
  Sidekiq.redis do |conn|
@@ -780,14 +851,19 @@ module Sidekiq
780
851
  true
781
852
  end
782
853
 
854
+ # Enqueue all dead jobs
783
855
  def retry_all
784
856
  each(&:retry) while size > 0
785
857
  end
786
858
 
859
+ # The maximum size of the Dead set. Older entries will be trimmed
860
+ # to stay within this limit. Default value is 10,000.
787
861
  def self.max_jobs
788
862
  Sidekiq[:dead_max_jobs]
789
863
  end
790
864
 
865
+ # The time limit for entries within the Dead set. Older entries will be thrown away.
866
+ # Default value is six months.
791
867
  def self.timeout
792
868
  Sidekiq[:dead_timeout_in_seconds]
793
869
  end
@@ -798,18 +874,23 @@ module Sidekiq
798
874
  # right now. Each process sends a heartbeat to Redis every 5 seconds
799
875
  # so this set should be relatively accurate, barring network partitions.
800
876
  #
801
- # Yields a Sidekiq::Process.
877
+ # @yieldparam [Sidekiq::Process]
802
878
  #
803
879
  class ProcessSet
804
880
  include Enumerable
805
881
 
882
+ # :nodoc:
883
+ # @api private
806
884
  def initialize(clean_plz = true)
807
885
  cleanup if clean_plz
808
886
  end
809
887
 
810
888
  # Cleans up dead processes recorded in Redis.
811
889
  # Returns the number of processes cleaned.
890
+ # :nodoc:
891
+ # @api private
812
892
  def cleanup
893
+ return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
813
894
  count = 0
814
895
  Sidekiq.redis do |conn|
815
896
  procs = conn.sscan_each("processes").to_a.sort
@@ -863,6 +944,7 @@ module Sidekiq
863
944
  # based on current heartbeat. #each does that and ensures the set only
864
945
  # contains Sidekiq processes which have sent a heartbeat within the last
865
946
  # 60 seconds.
947
+ # @return [Integer] current number of registered Sidekiq processes
866
948
  def size
867
949
  Sidekiq.redis { |conn| conn.scard("processes") }
868
950
  end
@@ -870,10 +952,12 @@ module Sidekiq
870
952
  # Total number of threads available to execute jobs.
871
953
  # For Sidekiq Enterprise customers this number (in production) must be
872
954
  # less than or equal to your licensed concurrency.
955
+ # @return [Integer] the sum of process concurrency
873
956
  def total_concurrency
874
957
  sum { |x| x["concurrency"].to_i }
875
958
  end
876
959
 
960
+ # @return [Integer] total amount of RSS memory consumed by Sidekiq processes
877
961
  def total_rss_in_kb
878
962
  sum { |x| x["rss"].to_i }
879
963
  end
@@ -882,6 +966,8 @@ module Sidekiq
882
966
  # Returns the identity of the current cluster leader or "" if no leader.
883
967
  # This is a Sidekiq Enterprise feature, will always return "" in Sidekiq
884
968
  # or Sidekiq Pro.
969
+ # @return [String] Identity of cluster leader
970
+ # @return [String] empty string if no leader
885
971
  def leader
886
972
  @leader ||= begin
887
973
  x = Sidekiq.redis { |c| c.get("dear-leader") }
@@ -908,6 +994,8 @@ module Sidekiq
908
994
  # 'identity' => <unique string identifying the process>,
909
995
  # }
910
996
  class Process
997
+ # :nodoc:
998
+ # @api private
911
999
  def initialize(hash)
912
1000
  @attribs = hash
913
1001
  end
@@ -932,18 +1020,31 @@ module Sidekiq
932
1020
  self["queues"]
933
1021
  end
934
1022
 
1023
+ # Signal this process to stop processing new jobs.
1024
+ # It will continue to execute jobs it has already fetched.
1025
+ # This method is *asynchronous* and it can take 5-10
1026
+ # seconds for the process to quiet.
935
1027
  def quiet!
936
1028
  signal("TSTP")
937
1029
  end
938
1030
 
1031
+ # Signal this process to shutdown.
1032
+ # It will shutdown within its configured :timeout value, default 25 seconds.
1033
+ # This method is *asynchronous* and it can take 5-10
1034
+ # seconds for the process to start shutting down.
939
1035
  def stop!
940
1036
  signal("TERM")
941
1037
  end
942
1038
 
1039
+ # Signal this process to log backtraces for all threads.
1040
+ # Useful if you have a frozen or deadlocked process which is
1041
+ # still sending a heartbeat.
1042
+ # This method is *asynchronous* and it can take 5-10 seconds.
943
1043
  def dump_threads
944
1044
  signal("TTIN")
945
1045
  end
946
1046
 
1047
+ # @return [Boolean] true if this process is quiet or shutting down
947
1048
  def stopping?
948
1049
  self["quiet"] == "true"
949
1050
  end
data/lib/sidekiq/cli.rb CHANGED
@@ -426,3 +426,4 @@ module Sidekiq # :nodoc:
426
426
  end
427
427
 
428
428
  require "sidekiq/systemd"
429
+ require "sidekiq/metrics/tracking"
@@ -47,6 +47,7 @@ module Sidekiq
47
47
  end
48
48
 
49
49
  def fire_event(event, options = {})
50
+ oneshot = options.fetch(:oneshot, true)
50
51
  reverse = options[:reverse]
51
52
  reraise = options[:reraise]
52
53
 
@@ -58,7 +59,7 @@ module Sidekiq
58
59
  handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
59
60
  raise ex if reraise
60
61
  end
61
- arr.clear # once we've fired an event, we never fire it again
62
+ arr.clear if oneshot # once we've fired an event, we never fire it again
62
63
  end
63
64
  end
64
65
  end
@@ -91,7 +91,7 @@ module Sidekiq
91
91
 
92
92
  msg = Sidekiq.load_json(jobstr)
93
93
  if msg["retry"]
94
- attempt_retry(nil, msg, queue, e)
94
+ process_retry(nil, msg, queue, e)
95
95
  else
96
96
  Sidekiq.death_handlers.each do |handler|
97
97
  handler.call(msg, e)
@@ -128,7 +128,7 @@ module Sidekiq
128
128
  end
129
129
 
130
130
  raise e unless msg["retry"]
131
- attempt_retry(jobinst, msg, queue, e)
131
+ process_retry(jobinst, msg, queue, e)
132
132
  # We've handled this error associated with this job, don't
133
133
  # need to handle it at the global level
134
134
  raise Skip
@@ -139,7 +139,7 @@ module Sidekiq
139
139
  # Note that +jobinst+ can be nil here if an error is raised before we can
140
140
  # instantiate the job instance. All access must be guarded and
141
141
  # best effort.
142
- def attempt_retry(jobinst, msg, queue, exception)
142
+ def process_retry(jobinst, msg, queue, exception)
143
143
  max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
144
144
 
145
145
  msg["queue"] = (msg["retry_queue"] || queue)
@@ -170,19 +170,50 @@ module Sidekiq
170
170
  msg["error_backtrace"] = compress_backtrace(lines)
171
171
  end
172
172
 
173
- if count < max_retry_attempts
174
- delay = delay_for(jobinst, count, exception)
175
- # Logging here can break retries if the logging device raises ENOSPC #3979
176
- # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
177
- retry_at = Time.now.to_f + delay
178
- payload = Sidekiq.dump_json(msg)
179
- redis do |conn|
180
- conn.zadd("retry", retry_at.to_s, payload)
181
- end
173
+ # Goodbye dear message, you (re)tried your best I'm sure.
174
+ return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
175
+
176
+ strategy, delay = delay_for(jobinst, count, exception)
177
+ case strategy
178
+ when :discard
179
+ return # poof!
180
+ when :kill
181
+ return retries_exhausted(jobinst, msg, exception)
182
+ end
183
+
184
+ # Logging here can break retries if the logging device raises ENOSPC #3979
185
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
186
+ jitter = rand(10) * (count + 1)
187
+ retry_at = Time.now.to_f + delay + jitter
188
+ payload = Sidekiq.dump_json(msg)
189
+ redis do |conn|
190
+ conn.zadd("retry", retry_at.to_s, payload)
191
+ end
192
+ end
193
+
194
+ # returns (strategy, seconds)
195
+ def delay_for(jobinst, count, exception)
196
+ rv = begin
197
+ # sidekiq_retry_in can return two different things:
198
+ # 1. When to retry next, as an integer of seconds
199
+ # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
200
+ jobinst&.sidekiq_retry_in_block&.call(count, exception)
201
+ rescue Exception => e
202
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
203
+ nil
204
+ end
205
+
206
+ delay = if Integer === rv && rv > 0
207
+ rv
208
+ elsif rv == :discard
209
+ return [:discard, nil] # do nothing, job goes poof
210
+ elsif rv == :kill
211
+ return [:kill, nil]
182
212
  else
183
- # Goodbye dear message, you (re)tried your best I'm sure.
184
- retries_exhausted(jobinst, msg, exception)
213
+ (count**4) + 15
185
214
  end
215
+
216
+ [:default, delay]
186
217
  end
187
218
 
188
219
  def retries_exhausted(jobinst, msg, exception)
@@ -216,22 +247,6 @@ module Sidekiq
216
247
  end
217
248
  end
218
249
 
219
- def delay_for(jobinst, count, exception)
220
- jitter = rand(10) * (count + 1)
221
- if jobinst&.sidekiq_retry_in_block
222
- custom_retry_in = retry_in(jobinst, count, exception).to_i
223
- return custom_retry_in + jitter if custom_retry_in > 0
224
- end
225
- (count**4) + 15 + jitter
226
- end
227
-
228
- def retry_in(jobinst, count, exception)
229
- jobinst.sidekiq_retry_in_block.call(count, exception)
230
- rescue Exception => e
231
- handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
232
- nil
233
- end
234
-
235
250
  def exception_caused_by_shutdown?(e, checked_causes = [])
236
251
  return false unless e.cause
237
252
 
@@ -79,6 +79,8 @@ module Sidekiq
79
79
  end
80
80
 
81
81
  def clear_heartbeat
82
+ flush_stats
83
+
82
84
  # Remove record from Redis since we are shutting down.
83
85
  # Note we don't stop the heartbeat thread; if the process
84
86
  # doesn't actually exit, it'll reappear in the Web UI.
@@ -98,7 +100,7 @@ module Sidekiq
98
100
 
99
101
  end
100
102
 
101
- def self.flush_stats
103
+ def flush_stats
102
104
  fails = Processor::FAILURE.reset
103
105
  procd = Processor::PROCESSED.reset
104
106
  return if fails + procd == 0
@@ -122,7 +124,6 @@ module Sidekiq
122
124
  Sidekiq.logger.warn("Unable to flush stats: #{ex}")
123
125
  end
124
126
  end
125
- at_exit(&method(:flush_stats))
126
127
 
127
128
  def ❤
128
129
  key = identity
@@ -179,6 +180,7 @@ module Sidekiq
179
180
 
180
181
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
181
182
  fire_event(:heartbeat) unless exists
183
+ fire_event(:beat, oneshot: false)
182
184
 
183
185
  return unless msg
184
186
 
@@ -0,0 +1,47 @@
1
+ require "sidekiq"
2
+ require "date"
3
+
4
+ # This file is designed to be required within the user's
5
+ # deployment script; it should need a bare minimum of dependencies.
6
+ #
7
+ # require "sidekiq/metrics/deploy"
8
+ # gitdesc = `git log -1 --format="%h %s"`.strip
9
+ # d = Sidekiq::Metrics::Deploy.new
10
+ # d.mark(label: gitdesc)
11
+ #
12
+ # Note that you cannot mark more than once per minute. This is a feature, not a bug.
13
+ module Sidekiq
14
+ module Metrics
15
+ class Deploy
16
+ MARK_TTL = 90 * 24 * 60 * 60 # 90 days
17
+
18
+ def initialize(pool = Sidekiq.redis_pool)
19
+ @pool = pool
20
+ end
21
+
22
+ def mark(at: Time.now, label: "")
23
+ # we need to round the timestamp so that we gracefully
24
+ # handle an excepted common error in marking deploys:
25
+ # having every process mark its deploy, leading
26
+ # to N marks for each deploy. Instead we round the time
27
+ # to the minute so that multple marks within that minute
28
+ # will all naturally rollup into one mark per minute.
29
+ whence = at.utc
30
+ floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
31
+ datecode = floor.strftime("%Y%m%d")
32
+ key = "#{datecode}-marks"
33
+ @pool.with do |c|
34
+ c.pipelined do |pipe|
35
+ pipe.hsetnx(key, floor.rfc3339, label)
36
+ pipe.expire(key, MARK_TTL)
37
+ end
38
+ end
39
+ end
40
+
41
+ def fetch(date = Time.now.utc.to_date)
42
+ datecode = date.strftime("%Y%m%d")
43
+ @pool.with { |c| c.hgetall("#{datecode}-marks") }
44
+ end
45
+ end
46
+ end
47
+ end